Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| faf9ceafac |
@@ -1,4 +1,5 @@
|
|||||||
// Command zfs-client is a simple CLI tool for creating and sending ZFS snapshots.
|
// Command zfs-client is the CLI tool for creating and uploading ZFS snapshots.
|
||||||
|
// It provides commands for backup, status checking, snapshot rotation, and incremental backups.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -21,35 +22,189 @@ func main() {
|
|||||||
command := os.Args[1]
|
command := os.Args[1]
|
||||||
|
|
||||||
switch command {
|
switch command {
|
||||||
case "snap", "snapshot":
|
case "backup":
|
||||||
// Create snapshot and send to server (auto full/incremental)
|
// Default: create manual backup (full or incremental)
|
||||||
// Optional: specify dataset as argument
|
fmt.Println("=== Creating and sending backup ===\n")
|
||||||
targetDataset := ""
|
|
||||||
if len(os.Args) > 2 {
|
snapshot, err := c.CreateSnapshot()
|
||||||
targetDataset = os.Args[2]
|
if err != nil {
|
||||||
fmt.Printf("→ Using dataset: %s\n", targetDataset)
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("=== Creating and sending snapshot ===\n")
|
if err := c.SendSnapshot(snapshot); err != nil {
|
||||||
|
fmt.Printf("Error sending snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
snapshot, err := c.CreateAndSend(targetDataset)
|
fmt.Println("\n✓ Backup completed successfully!")
|
||||||
|
|
||||||
|
case "backup-full":
|
||||||
|
// Force full backup (no incremental)
|
||||||
|
fmt.Println("=== Creating full backup ===\n")
|
||||||
|
|
||||||
|
snapshot, err := c.CreateSnapshot()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.SendIncremental(snapshot, ""); err != nil {
|
||||||
|
fmt.Printf("Error sending snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create bookmark for future incremental backups
|
||||||
|
if err := c.CreateBookmark(snapshot); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n✓ Full backup completed successfully!")
|
||||||
|
|
||||||
|
case "backup-incremental":
|
||||||
|
// Incremental backup from last bookmark
|
||||||
|
fmt.Println("=== Creating incremental backup ===\n")
|
||||||
|
|
||||||
|
// Check for existing bookmark
|
||||||
|
lastBookmark, err := c.GetLastBookmark()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error checking bookmarks: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if lastBookmark == "" {
|
||||||
|
fmt.Println("No existing bookmark found. Use 'backup-full' for initial backup.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot, err := c.CreateSnapshot()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
|
||||||
|
fmt.Printf("Error sending incremental snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create bookmark for future incremental backups
|
||||||
|
if err := c.CreateBookmark(snapshot); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n✓ Incremental backup completed successfully!")
|
||||||
|
|
||||||
|
case "snapshot":
|
||||||
|
// Create typed snapshots (hourly, daily, weekly, monthly)
|
||||||
|
if len(os.Args) < 3 {
|
||||||
|
fmt.Println("Usage: zfs-client snapshot <hourly|daily|weekly|monthly>")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapType := client.SnapshotType(os.Args[2])
|
||||||
|
switch snapType {
|
||||||
|
case client.SnapshotHourly, client.SnapshotDaily, client.SnapshotWeekly, client.SnapshotMonthly:
|
||||||
|
// Valid type
|
||||||
|
default:
|
||||||
|
fmt.Printf("Invalid snapshot type: %s\n", snapType)
|
||||||
|
fmt.Println("Valid types: hourly, daily, weekly, monthly")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("=== Creating %s snapshot ===\n\n", snapType)
|
||||||
|
|
||||||
|
snapshot, err := c.CreateSnapshotWithType(snapType)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for existing bookmark for incremental
|
||||||
|
lastBookmark, _ := c.GetLastBookmark()
|
||||||
|
|
||||||
|
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
|
||||||
|
fmt.Printf("Error sending snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create bookmark
|
||||||
|
if err := c.CreateBookmark(snapshot); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rotate local snapshots using server policy if available
|
||||||
|
policy, err := getRotationPolicy(c)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
|
||||||
|
policy = client.DefaultPolicy()
|
||||||
|
}
|
||||||
|
if err := c.RotateLocalSnapshots(policy); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to rotate snapshots: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n✓ %s snapshot completed successfully!\n", snapType)
|
||||||
|
|
||||||
|
case "rotate":
|
||||||
|
// Rotate local snapshots using server policy if available
|
||||||
|
fmt.Println("=== Rotating local snapshots ===\n")
|
||||||
|
|
||||||
|
policy, err := getRotationPolicy(c)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
|
||||||
|
policy = client.DefaultPolicy()
|
||||||
|
}
|
||||||
|
if err := c.RotateLocalSnapshots(policy); err != nil {
|
||||||
|
fmt.Printf("Error rotating snapshots: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n✓ Rotation completed!")
|
||||||
|
|
||||||
|
case "rotate-remote":
|
||||||
|
// Request server to rotate remote snapshots
|
||||||
|
if err := c.RequestRotation(); err != nil {
|
||||||
|
fmt.Printf("Error requesting rotation: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "status":
|
||||||
|
if err := c.GetStatus(); err != nil {
|
||||||
|
fmt.Printf("Error getting status: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "bookmarks":
|
||||||
|
// List bookmarks
|
||||||
|
fmt.Println("=== ZFS Bookmarks ===\n")
|
||||||
|
|
||||||
|
bookmark, err := c.GetLastBookmark()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Error: %v\n", err)
|
fmt.Printf("Error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if snapshot.FullBackup {
|
if bookmark == "" {
|
||||||
fmt.Println("\n✓ Full backup completed!")
|
fmt.Println("No bookmarks found")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("\n✓ Incremental backup completed!")
|
fmt.Printf("Last bookmark: %s\n", bookmark)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "status":
|
case "change-password":
|
||||||
// Check server connection and quota
|
// Change client API key/password
|
||||||
if err := c.GetStatus(); err != nil {
|
if len(os.Args) < 3 {
|
||||||
|
fmt.Println("Usage: zfs-client change-password <new-api-key>")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
newKey := os.Args[2]
|
||||||
|
|
||||||
|
fmt.Println("=== Changing API Key ===\n")
|
||||||
|
if err := c.ChangePassword(newKey); err != nil {
|
||||||
fmt.Printf("Error: %v\n", err)
|
fmt.Printf("Error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
fmt.Println("\n✓ API key changed successfully!")
|
||||||
|
fmt.Println("Update your .env file with the new API_KEY value.")
|
||||||
|
|
||||||
case "help", "-h", "--help":
|
case "help", "-h", "--help":
|
||||||
printUsage()
|
printUsage()
|
||||||
@@ -61,22 +216,56 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getRotationPolicy fetches the rotation policy from the server.
|
||||||
|
// If the server has a policy configured, it must be used.
|
||||||
|
// Otherwise, the default policy is returned.
|
||||||
|
func getRotationPolicy(c *client.Client) (*client.SnapshotPolicy, error) {
|
||||||
|
serverPolicy, err := c.GetRotationPolicy()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if serverPolicy.ServerManaged && serverPolicy.RotationPolicy != nil {
|
||||||
|
fmt.Println(" Using server-managed rotation policy")
|
||||||
|
return serverPolicy.RotationPolicy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// No server policy, use default
|
||||||
|
fmt.Println(" Using default rotation policy")
|
||||||
|
return client.DefaultPolicy(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func printUsage() {
|
func printUsage() {
|
||||||
fmt.Println("ZFS Snapshot Backup Client - Simple Version")
|
fmt.Println("ZFS Snapshot Backup Client")
|
||||||
fmt.Println("\nUsage: zfs-client [command] [dataset]")
|
fmt.Println("\nUsage: zfs-client [command]")
|
||||||
fmt.Println("\nCommands:")
|
fmt.Println("\nCommands:")
|
||||||
fmt.Println(" snap [dataset] - Create snapshot and send to server")
|
fmt.Println(" backup - Create snapshot and send (auto incremental if bookmark exists)")
|
||||||
fmt.Println(" If dataset not specified, uses LOCAL_DATASET from config")
|
fmt.Println(" backup-full - Create full backup (no incremental)")
|
||||||
fmt.Println(" status - Check server connection and quota")
|
fmt.Println(" backup-incremental - Create incremental backup from last bookmark")
|
||||||
fmt.Println(" help - Show this help message")
|
fmt.Println(" snapshot <type> - Create typed snapshot (hourly|daily|weekly|monthly)")
|
||||||
|
fmt.Println(" rotate - Rotate local snapshots based on retention policy")
|
||||||
|
fmt.Println(" rotate-remote - Request server to rotate old remote snapshots")
|
||||||
|
fmt.Println(" status - Check server status and quota")
|
||||||
|
fmt.Println(" bookmarks - List ZFS bookmarks")
|
||||||
|
fmt.Println(" change-password <new-key> - Change client API key")
|
||||||
|
fmt.Println(" help - Show this help message")
|
||||||
|
fmt.Println("\nSnapshot Retention Policy (default):")
|
||||||
|
fmt.Println(" Hourly: 24 snapshots")
|
||||||
|
fmt.Println(" Daily: 7 snapshots")
|
||||||
|
fmt.Println(" Weekly: 4 snapshots")
|
||||||
|
fmt.Println(" Monthly: 12 snapshots")
|
||||||
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||||
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
||||||
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
||||||
fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)")
|
fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)")
|
||||||
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
|
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
|
||||||
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
|
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
|
||||||
|
fmt.Println(" STORAGE_TYPE - Storage type: s3 or local (default: s3)")
|
||||||
fmt.Println("\nExamples:")
|
fmt.Println("\nExamples:")
|
||||||
fmt.Println(" zfs-client snap # Use configured dataset")
|
fmt.Println(" zfs-client backup")
|
||||||
fmt.Println(" zfs-client snap tank/data # Backup specific dataset")
|
fmt.Println(" zfs-client backup-full")
|
||||||
fmt.Println(" zfs-client status")
|
fmt.Println(" zfs-client snapshot hourly")
|
||||||
|
fmt.Println(" zfs-client rotate")
|
||||||
|
fmt.Println(" zfs-client change-password mynewsecretkey")
|
||||||
|
fmt.Println(" CLIENT_ID=myclient zfs-client backup")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
// Command zfs-restore is a simple CLI tool for restoring ZFS snapshots from a backup server.
|
// Command zfs-restore is a CLI tool for restoring ZFS snapshots from a backup server.
|
||||||
|
// It provides commands for listing, restoring, and mounting snapshots.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -22,8 +23,7 @@ func main() {
|
|||||||
command := os.Args[1]
|
command := os.Args[1]
|
||||||
|
|
||||||
switch command {
|
switch command {
|
||||||
case "list", "ls":
|
case "list":
|
||||||
// List available snapshots
|
|
||||||
snapshots, err := client.ListSnapshots()
|
snapshots, err := client.ListSnapshots()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Error: %v\n", err)
|
fmt.Printf("Error: %v\n", err)
|
||||||
@@ -32,13 +32,8 @@ func main() {
|
|||||||
client.DisplaySnapshots(snapshots)
|
client.DisplaySnapshots(snapshots)
|
||||||
|
|
||||||
case "restore":
|
case "restore":
|
||||||
// Restore snapshot - can use number or "latest" keyword
|
if len(os.Args) < 4 {
|
||||||
if len(os.Args) < 3 {
|
fmt.Println("Usage: zfs-restore restore <snapshot-number> <target-dataset> [--force]")
|
||||||
fmt.Println("Usage: zfs-restore restore <snapshot-number-or-latest> <target-dataset> [--force]")
|
|
||||||
fmt.Println("\nExamples:")
|
|
||||||
fmt.Println(" zfs-restore restore 1 tank/restored")
|
|
||||||
fmt.Println(" zfs-restore restore latest tank/restored")
|
|
||||||
fmt.Println(" zfs-restore restore latest tank/restored --force")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,80 +48,99 @@ func main() {
|
|||||||
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(snapshots) == 0 {
|
// Parse snapshot number
|
||||||
fmt.Println("No snapshots available. Run 'zfs-restore list' first.")
|
var snapNum int
|
||||||
|
fmt.Sscanf(os.Args[2], "%d", &snapNum)
|
||||||
|
|
||||||
|
if snapNum < 1 || snapNum > len(snapshots) {
|
||||||
|
fmt.Printf("Invalid snapshot number. Use 'list' to see available snapshots.\n")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotArg := os.Args[2]
|
snapshot := snapshots[snapNum-1]
|
||||||
var snapshot *restore.SnapshotMetadata
|
targetDataset := os.Args[3]
|
||||||
|
force := len(os.Args) > 4 && os.Args[4] == "--force"
|
||||||
|
|
||||||
if snapshotArg == "latest" {
|
if err := client.RestoreSnapshot(snapshot, targetDataset, force); err != nil {
|
||||||
snapshot = snapshots[0]
|
fmt.Printf("Error: %v\n", err)
|
||||||
fmt.Printf("→ Restoring latest snapshot from %s\n", snapshot.Timestamp.Format("2006-01-02 15:04:05"))
|
|
||||||
} else {
|
|
||||||
var snapNum int
|
|
||||||
fmt.Sscanf(snapshotArg, "%d", &snapNum)
|
|
||||||
|
|
||||||
if snapNum < 1 || snapNum > len(snapshots) {
|
|
||||||
fmt.Printf("Invalid snapshot number. Use 'zfs-restore list' to see available snapshots.\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
snapshot = snapshots[snapNum-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get target dataset (either from args or prompt)
|
|
||||||
targetDataset := ""
|
|
||||||
force := false
|
|
||||||
|
|
||||||
for i, arg := range os.Args {
|
|
||||||
if arg == "--force" {
|
|
||||||
force = true
|
|
||||||
}
|
|
||||||
if arg != "restore" && arg != snapshotArg && arg != "--force" && targetDataset == "" && i > 2 && arg != os.Args[0] {
|
|
||||||
targetDataset = arg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if targetDataset == "" {
|
|
||||||
fmt.Printf("Target dataset: ")
|
|
||||||
fmt.Scanln(&targetDataset)
|
|
||||||
}
|
|
||||||
|
|
||||||
if targetDataset == "" {
|
|
||||||
fmt.Println("Error: target dataset is required")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := client.RestoreSnapshot(snapshot, targetDataset, force, snapshots); err != nil {
|
case "save":
|
||||||
|
if len(os.Args) < 4 {
|
||||||
|
fmt.Println("Usage: zfs-restore save <snapshot-number> <output-file>")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := client.ListSnapshots()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(snapshots, func(i, j int) bool {
|
||||||
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||||
|
})
|
||||||
|
|
||||||
|
var snapNum int
|
||||||
|
fmt.Sscanf(os.Args[2], "%d", &snapNum)
|
||||||
|
|
||||||
|
if snapNum < 1 || snapNum > len(snapshots) {
|
||||||
|
fmt.Printf("Invalid snapshot number.\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot := snapshots[snapNum-1]
|
||||||
|
outputFile := os.Args[3]
|
||||||
|
|
||||||
|
if err := client.RestoreToFile(snapshot, outputFile); err != nil {
|
||||||
fmt.Printf("Error: %v\n", err)
|
fmt.Printf("Error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "mount":
|
case "mount":
|
||||||
// Mount a restored dataset to access files
|
if len(os.Args) < 4 {
|
||||||
if len(os.Args) < 3 {
|
fmt.Println("Usage: zfs-restore mount <dataset> <mountpoint>")
|
||||||
fmt.Println("Usage: zfs-restore mount <dataset> [mountpoint]")
|
|
||||||
fmt.Println("\nExamples:")
|
|
||||||
fmt.Println(" zfs-restore mount tank/restored /mnt/recover")
|
|
||||||
fmt.Println(" zfs-restore mount tank/restored # interactive")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
dataset := os.Args[2]
|
dataset := os.Args[2]
|
||||||
mountpoint := ""
|
mountpoint := os.Args[3]
|
||||||
|
|
||||||
if len(os.Args) > 3 {
|
if err := client.MountSnapshot(dataset, mountpoint); err != nil {
|
||||||
mountpoint = os.Args[3]
|
fmt.Printf("Error: %v\n", err)
|
||||||
} else {
|
os.Exit(1)
|
||||||
fmt.Printf("Mountpoint [/mnt/recover]: ")
|
|
||||||
fmt.Scanln(&mountpoint)
|
|
||||||
if mountpoint == "" {
|
|
||||||
mountpoint = "/mnt/recover"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := client.MountDataset(dataset, mountpoint); err != nil {
|
case "latest":
|
||||||
|
if len(os.Args) < 3 {
|
||||||
|
fmt.Println("Usage: zfs-restore latest <target-dataset> [--force]")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := client.ListSnapshots()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshots) == 0 {
|
||||||
|
fmt.Println("No snapshots available")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort and get latest
|
||||||
|
sort.Slice(snapshots, func(i, j int) bool {
|
||||||
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||||
|
})
|
||||||
|
|
||||||
|
latest := snapshots[0]
|
||||||
|
targetDataset := os.Args[2]
|
||||||
|
force := len(os.Args) > 3 && os.Args[3] == "--force"
|
||||||
|
|
||||||
|
fmt.Printf("Restoring latest snapshot from %s\n", latest.Timestamp.Format("2006-01-02 15:04:05"))
|
||||||
|
|
||||||
|
if err := client.RestoreSnapshot(latest, targetDataset, force); err != nil {
|
||||||
fmt.Printf("Error: %v\n", err)
|
fmt.Printf("Error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -142,18 +156,21 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func printUsage() {
|
func printUsage() {
|
||||||
fmt.Println("ZFS Snapshot Restore Tool - Simple Version")
|
fmt.Println("ZFS Snapshot Restore Tool")
|
||||||
fmt.Println("\nUsage: zfs-restore [command]")
|
fmt.Println("\nUsage: zfs-restore [command] [options]")
|
||||||
fmt.Println("\nCommands:")
|
fmt.Println("\nCommands:")
|
||||||
fmt.Println(" list - List available snapshots")
|
fmt.Println(" list - List available snapshots")
|
||||||
fmt.Println(" restore <#|latest> <dataset> [--force] - Restore snapshot to ZFS")
|
fmt.Println(" restore <#> <dataset> [--force] - Restore snapshot to ZFS dataset")
|
||||||
fmt.Println(" mount <dataset> [mountpoint] - Mount dataset to recover files")
|
fmt.Println(" latest <dataset> [--force] - Restore most recent snapshot")
|
||||||
fmt.Println(" help - Show this help message")
|
fmt.Println(" save <#> <file> - Save snapshot to file")
|
||||||
fmt.Println("\nQuick Examples:")
|
fmt.Println(" mount <dataset> <mountpoint> - Mount restored dataset")
|
||||||
fmt.Println(" zfs-restore list - See available backups")
|
fmt.Println(" help - Show this help message")
|
||||||
fmt.Println(" zfs-restore restore latest tank/data - Restore most recent backup")
|
fmt.Println("\nExamples:")
|
||||||
fmt.Println(" zfs-restore restore 1 tank/restored - Restore snapshot #1")
|
fmt.Println(" zfs-restore list")
|
||||||
fmt.Println(" zfs-restore mount tank/restored /mnt - Mount to recover files")
|
fmt.Println(" zfs-restore restore 1 tank/restored")
|
||||||
|
fmt.Println(" zfs-restore latest tank/restored --force")
|
||||||
|
fmt.Println(" zfs-restore save 2 backup.zfs.lz4")
|
||||||
|
fmt.Println(" zfs-restore mount tank/restored /mnt/restore")
|
||||||
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||||
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
||||||
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
||||||
|
|||||||
30
go.mod
30
go.mod
@@ -3,32 +3,14 @@ module git.ma-al.com/goc_marek/zfs
|
|||||||
go 1.25.6
|
go 1.25.6
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/a-h/templ v0.3.977
|
github.com/minio/minio-go/v7 v7.0.98
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.7
|
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
|
|
||||||
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||||
github.com/pierrec/lz4/v4 v4.1.25
|
|
||||||
modernc.org/sqlite v1.45.0
|
modernc.org/sqlite v1.45.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
github.com/a-h/templ v0.3.977 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
|
||||||
github.com/aws/smithy-go v1.24.0 // indirect
|
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/go-ini/ini v1.67.0 // indirect
|
github.com/go-ini/ini v1.67.0 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
@@ -38,9 +20,10 @@ require (
|
|||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/minio/crc64nvme v1.1.1 // indirect
|
github.com/minio/crc64nvme v1.1.1 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/minio-go/v7 v7.0.98 // indirect
|
|
||||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
github.com/philhofer/fwd v1.2.0 // indirect
|
github.com/philhofer/fwd v1.2.0 // indirect
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.25 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/rs/xid v1.6.0 // indirect
|
github.com/rs/xid v1.6.0 // indirect
|
||||||
github.com/tinylib/msgp v1.6.1 // indirect
|
github.com/tinylib/msgp v1.6.1 // indirect
|
||||||
@@ -48,10 +31,9 @@ require (
|
|||||||
golang.org/x/crypto v0.46.0 // indirect
|
golang.org/x/crypto v0.46.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
||||||
golang.org/x/net v0.48.0 // indirect
|
golang.org/x/net v0.48.0 // indirect
|
||||||
golang.org/x/sync v0.19.0 // indirect
|
|
||||||
golang.org/x/sys v0.39.0 // indirect
|
golang.org/x/sys v0.39.0 // indirect
|
||||||
golang.org/x/text v0.32.0 // indirect
|
golang.org/x/text v0.32.0 // indirect
|
||||||
golang.org/x/tools v0.39.0 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
modernc.org/libc v1.67.6 // indirect
|
modernc.org/libc v1.67.6 // indirect
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.11.0 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
|
|||||||
49
go.sum
49
go.sum
@@ -1,49 +1,11 @@
|
|||||||
github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg=
|
github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg=
|
||||||
github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
|
github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
|
||||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
|
||||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
@@ -73,10 +35,14 @@ github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
|||||||
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||||
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
|
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
|
||||||
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||||
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
|
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
|
||||||
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
|
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
@@ -98,7 +64,10 @@ golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
|||||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -16,15 +17,9 @@ import (
|
|||||||
"github.com/pierrec/lz4/v4"
|
"github.com/pierrec/lz4/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
var uploadUrl = "/upload-stream/"
|
|
||||||
|
|
||||||
// SnapshotResult contains the result of a snapshot creation and send operation.
|
|
||||||
type SnapshotResult struct {
|
|
||||||
FullBackup bool
|
|
||||||
Snapshot *zfs.Dataset
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client handles snapshot backup operations to a remote server.
|
// Client handles snapshot backup operations to a remote server.
|
||||||
|
// It manages creating local ZFS snapshots and transmitting them
|
||||||
|
// to the backup server via HTTP or SSH.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
config *Config
|
config *Config
|
||||||
}
|
}
|
||||||
@@ -34,58 +29,12 @@ func New(config *Config) *Client {
|
|||||||
return &Client{config: config}
|
return &Client{config: config}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateAndSend creates a snapshot and sends it to the backup server via HTTP.
|
|
||||||
// It automatically detects if this is a full or incremental backup:
|
|
||||||
// - If no bookmark exists, does a full backup
|
|
||||||
// - If bookmark exists, does an incremental backup from the bookmark
|
|
||||||
// If targetDataset is provided, it overrides the configured dataset.
|
|
||||||
func (c *Client) CreateAndSend(targetDataset string) (*SnapshotResult, error) {
|
|
||||||
// Use provided dataset or fall back to config
|
|
||||||
if targetDataset == "" {
|
|
||||||
targetDataset = c.config.LocalDataset
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for existing bookmark to determine backup type
|
|
||||||
lastBookmark, err := c.GetLastBookmark()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create new snapshot
|
|
||||||
snapshot, err := c.CreateSnapshot(targetDataset)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
isFullBackup := lastBookmark == ""
|
|
||||||
if isFullBackup {
|
|
||||||
fmt.Println("→ No previous backup found, doing FULL backup...")
|
|
||||||
// Send as full (no base)
|
|
||||||
if err := c.SendIncrementalHTTP(snapshot, targetDataset, ""); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark)
|
|
||||||
// Send as incremental from bookmark
|
|
||||||
if err := c.SendIncrementalHTTP(snapshot, targetDataset, lastBookmark); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create bookmark for future incremental backups
|
|
||||||
if err := c.CreateBookmark(snapshot); err != nil {
|
|
||||||
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SnapshotResult{
|
|
||||||
FullBackup: isFullBackup,
|
|
||||||
Snapshot: snapshot,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
|
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
|
||||||
func (c *Client) CreateSnapshot(dataset string) (*zfs.Dataset, error) {
|
// The snapshot is named with a timestamp for easy identification.
|
||||||
ds, err := zfs.GetDataset(dataset)
|
// Returns the created snapshot dataset or an error.
|
||||||
|
func (c *Client) CreateSnapshot() (*zfs.Dataset, error) {
|
||||||
|
// Get the local dataset
|
||||||
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||||
}
|
}
|
||||||
@@ -109,41 +58,30 @@ func (c *Client) GetSnapshotSize(snapshot *zfs.Dataset) int64 {
|
|||||||
return int64(snapshot.Used)
|
return int64(snapshot.Used)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendIncrementalHTTP sends a snapshot to the server via HTTP.
|
// SendSnapshot sends a snapshot to the backup server.
|
||||||
// The server then handles storage (S3 or local ZFS).
|
// It first requests upload authorization, then streams the snapshot
|
||||||
// datasetName should be the ZFS dataset being backed up (e.g., "tank/data")
|
// using the appropriate method (S3 or ZFS receive).
|
||||||
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, datasetName, base string) error {
|
func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||||
estimatedSize := c.GetSnapshotSize(snapshot)
|
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||||
|
|
||||||
// Determine if this is incremental or full
|
|
||||||
isIncremental := base != ""
|
|
||||||
|
|
||||||
// Request upload authorization from server
|
// Request upload authorization from server
|
||||||
uploadReq := map[string]interface{}{
|
uploadReq := map[string]interface{}{
|
||||||
"client_id": c.config.ClientID,
|
"client_id": c.config.ClientID,
|
||||||
"api_key": c.config.APIKey,
|
"api_key": c.config.APIKey,
|
||||||
"dataset_name": datasetName,
|
"dataset_name": c.config.LocalDataset,
|
||||||
"timestamp": time.Now().Format(time.RFC3339),
|
"timestamp": time.Now().Format(time.RFC3339),
|
||||||
"compressed": c.config.Compress,
|
"compressed": c.config.Compress,
|
||||||
"estimated_size": estimatedSize,
|
"estimated_size": estimatedSize,
|
||||||
"incremental": isIncremental,
|
|
||||||
"base_snapshot": base,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
reqBody, _ := json.Marshal(uploadReq)
|
reqBody, _ := json.Marshal(uploadReq)
|
||||||
uploadURL := c.config.ServerURL
|
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
|
||||||
// Ensure proper URL format
|
|
||||||
if !strings.HasSuffix(uploadURL, "/") {
|
|
||||||
uploadURL += "/"
|
|
||||||
}
|
|
||||||
uploadURL += "upload"
|
|
||||||
|
|
||||||
resp, err := http.Post(uploadURL, "application/json", bytes.NewBuffer(reqBody))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to request upload: %v", err)
|
return fmt.Errorf("failed to request upload: %v", err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Parse server response
|
||||||
var uploadResp struct {
|
var uploadResp struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
@@ -151,6 +89,7 @@ func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, datasetName, base st
|
|||||||
UploadMethod string `json:"upload_method"`
|
UploadMethod string `json:"upload_method"`
|
||||||
StorageKey string `json:"storage_key"`
|
StorageKey string `json:"storage_key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||||
return fmt.Errorf("failed to decode response: %v", err)
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
}
|
}
|
||||||
@@ -163,24 +102,20 @@ func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, datasetName, base st
|
|||||||
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
||||||
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||||
|
|
||||||
// Stream to server via HTTP
|
// Choose upload method based on server response
|
||||||
return c.streamToServer(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
if uploadResp.UploadMethod == "s3" {
|
||||||
|
return c.streamToS3(snapshot, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||||
|
}
|
||||||
|
return c.sendViaZFS(snapshot, uploadResp.StorageKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// streamToServer streams a ZFS snapshot to the backup server via HTTP.
|
// streamToS3 streams a ZFS snapshot to S3 storage via HTTP.
|
||||||
func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
// The snapshot is optionally compressed with LZ4 before transmission.
|
||||||
fmt.Printf("→ Streaming snapshot to server...\n")
|
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
|
||||||
|
fmt.Printf("→ Streaming snapshot to S3...\n")
|
||||||
|
|
||||||
// Create ZFS send command
|
// Create ZFS send command
|
||||||
var cmd *exec.Cmd
|
cmd := exec.Command("zfs", "send", snapshot.Name)
|
||||||
if base != "" {
|
|
||||||
// Incremental send from bookmark or snapshot
|
|
||||||
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
|
|
||||||
} else {
|
|
||||||
// Full send
|
|
||||||
cmd = exec.Command("zfs", "send", snapshot.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
zfsOut, err := cmd.StdoutPipe()
|
zfsOut, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create pipe: %v", err)
|
return fmt.Errorf("failed to create pipe: %v", err)
|
||||||
@@ -197,10 +132,12 @@ func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageK
|
|||||||
fmt.Printf(" Compressing with LZ4...\n")
|
fmt.Printf(" Compressing with LZ4...\n")
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
lz4Writer := lz4.NewWriter(pw)
|
lz4Writer := lz4.NewWriter(pw)
|
||||||
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks
|
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
// Copy zfs output to LZ4 writer
|
||||||
io.Copy(lz4Writer, zfsOut)
|
io.Copy(lz4Writer, zfsOut)
|
||||||
|
// Close LZ4 writer first to flush, then close pipe
|
||||||
lz4Writer.Close()
|
lz4Writer.Close()
|
||||||
pw.Close()
|
pw.Close()
|
||||||
}()
|
}()
|
||||||
@@ -208,45 +145,25 @@ func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageK
|
|||||||
reader = pr
|
reader = pr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create HTTP request to server
|
// Create HTTP request
|
||||||
// Build full URL properly - check if uploadURL is already full URL
|
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||||
fullURL := uploadURL
|
|
||||||
// If uploadURL is a relative path, prepend server URL
|
|
||||||
if !strings.HasPrefix(uploadURL, "http://") && !strings.HasPrefix(uploadURL, "https://") {
|
|
||||||
fullURL = c.config.ServerURL
|
|
||||||
// Remove trailing slash from base URL if present
|
|
||||||
fullURL = strings.TrimRight(fullURL, "/")
|
|
||||||
// Add leading slash to upload URL if not present
|
|
||||||
if !strings.HasPrefix(uploadURL, "/") {
|
|
||||||
uploadURL = "/" + uploadURL
|
|
||||||
}
|
|
||||||
fullURL += uploadURL
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf(" Streaming to: %s\n", fullURL)
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", fullURL, reader)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create request: %v", err)
|
return fmt.Errorf("failed to create request: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set headers
|
// Set required headers
|
||||||
req.Header.Set("X-API-Key", c.config.APIKey)
|
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||||
req.Header.Set("X-Storage-Key", storageKey)
|
req.Header.Set("X-Storage-Key", storageKey)
|
||||||
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||||
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||||
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
|
|
||||||
if base != "" {
|
|
||||||
req.Header.Set("X-Base-Snapshot", base)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
|
||||||
// Send request with no timeout for large uploads
|
// Send request with no timeout for large uploads
|
||||||
httpClient := &http.Client{
|
client := &http.Client{
|
||||||
Timeout: 0,
|
Timeout: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
httpResp, err := httpClient.Do(req)
|
httpResp, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmd.Process.Kill()
|
cmd.Process.Kill()
|
||||||
return fmt.Errorf("failed to upload: %v", err)
|
return fmt.Errorf("failed to upload: %v", err)
|
||||||
@@ -283,7 +200,42 @@ func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageK
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sendViaZFS sends a snapshot via traditional ZFS send/receive over SSH.
|
||||||
|
// This method is used when the server uses local ZFS storage.
|
||||||
|
func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
|
||||||
|
fmt.Printf("→ Sending via ZFS send/receive...\n")
|
||||||
|
|
||||||
|
// Extract server host from URL
|
||||||
|
serverHost := c.config.ServerURL
|
||||||
|
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
||||||
|
serverHost = serverHost[7:]
|
||||||
|
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
||||||
|
serverHost = serverHost[8:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove port if present
|
||||||
|
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
||||||
|
serverHost = serverHost[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute ZFS send over SSH
|
||||||
|
cmd := exec.Command("sh", "-c",
|
||||||
|
fmt.Sprintf("zfs send %s | ssh %s 'zfs recv -F %s'",
|
||||||
|
snapshot.Name, serverHost, receivePath))
|
||||||
|
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Snapshot sent successfully!\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetStatus retrieves and displays the client's backup status from the server.
|
// GetStatus retrieves and displays the client's backup status from the server.
|
||||||
|
// Shows storage usage, quota, and snapshot count.
|
||||||
func (c *Client) GetStatus() error {
|
func (c *Client) GetStatus() error {
|
||||||
url := fmt.Sprintf("%s/status?client_id=%s&api_key=%s",
|
url := fmt.Sprintf("%s/status?client_id=%s&api_key=%s",
|
||||||
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
||||||
@@ -321,3 +273,106 @@ func (c *Client) GetStatus() error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RequestRotation asks the server to rotate old snapshots.
|
||||||
|
// This deletes the oldest snapshots to free up space.
|
||||||
|
func (c *Client) RequestRotation() error {
|
||||||
|
reqBody, _ := json.Marshal(map[string]string{
|
||||||
|
"client_id": c.config.ClientID,
|
||||||
|
"api_key": c.config.APIKey,
|
||||||
|
})
|
||||||
|
|
||||||
|
resp, err := http.Post(c.config.ServerURL+"/rotate", "application/json", bytes.NewBuffer(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to request rotation: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var rotateResp struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
DeletedCount int `json:"deleted_count"`
|
||||||
|
ReclaimedBytes int64 `json:"reclaimed_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&rotateResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rotateResp.Success {
|
||||||
|
return fmt.Errorf("rotation failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Rotation complete\n")
|
||||||
|
fmt.Printf(" Deleted: %d snapshots\n", rotateResp.DeletedCount)
|
||||||
|
fmt.Printf(" Freed: %.2f GB\n", float64(rotateResp.ReclaimedBytes)/(1024*1024*1024))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerRotationPolicy represents the rotation policy response from the server
|
||||||
|
type ServerRotationPolicy struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
RotationPolicy *SnapshotPolicy `json:"rotation_policy"`
|
||||||
|
ServerManaged bool `json:"server_managed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRotationPolicy fetches the rotation policy from the server.
|
||||||
|
// If the server has a policy configured for this client, it must be used.
|
||||||
|
// Returns the policy and whether it's server-managed (mandatory).
|
||||||
|
func (c *Client) GetRotationPolicy() (*ServerRotationPolicy, error) {
|
||||||
|
url := fmt.Sprintf("%s/rotation-policy?client_id=%s&api_key=%s",
|
||||||
|
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
||||||
|
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get rotation policy: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var policyResp ServerRotationPolicy
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&policyResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !policyResp.Success {
|
||||||
|
return nil, fmt.Errorf("failed to get rotation policy: %s", policyResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &policyResp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangePassword changes the client's API key on the server.
|
||||||
|
// Requires the current API key for authentication and the new key.
|
||||||
|
func (c *Client) ChangePassword(newAPIKey string) error {
|
||||||
|
reqBody, _ := json.Marshal(map[string]string{
|
||||||
|
"client_id": c.config.ClientID,
|
||||||
|
"current_key": c.config.APIKey,
|
||||||
|
"new_key": newAPIKey,
|
||||||
|
})
|
||||||
|
|
||||||
|
resp, err := http.Post(c.config.ServerURL+"/client/change-password", "application/json", bytes.NewBuffer(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to change password: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Success {
|
||||||
|
return fmt.Errorf("failed to change password: %s", result.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update local config with new key
|
||||||
|
c.config.APIKey = newAPIKey
|
||||||
|
|
||||||
|
fmt.Printf("✓ Password changed successfully\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Config holds client-side configuration for connecting to the backup server.
|
// Config holds client-side configuration for connecting to the backup server.
|
||||||
|
// Note: Storage type is determined by the server, not the client.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// ClientID is the unique identifier for this client
|
// ClientID is the unique identifier for this client
|
||||||
ClientID string `json:"client_id"`
|
ClientID string `json:"client_id"`
|
||||||
@@ -19,8 +20,7 @@ type Config struct {
|
|||||||
// LocalDataset is the ZFS dataset to backup
|
// LocalDataset is the ZFS dataset to backup
|
||||||
LocalDataset string `json:"local_dataset"`
|
LocalDataset string `json:"local_dataset"`
|
||||||
// Compress enables LZ4 compression for transfers
|
// Compress enables LZ4 compression for transfers
|
||||||
Compress bool `json:"compress"`
|
Compress bool `json:"compress"`
|
||||||
UploadURL string `json:upload_url`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads client configuration from environment variables and .env file.
|
// LoadConfig loads client configuration from environment variables and .env file.
|
||||||
@@ -32,10 +32,9 @@ func LoadConfig() *Config {
|
|||||||
return &Config{
|
return &Config{
|
||||||
ClientID: getEnv("CLIENT_ID", "client1"),
|
ClientID: getEnv("CLIENT_ID", "client1"),
|
||||||
APIKey: getEnv("API_KEY", "secret123"),
|
APIKey: getEnv("API_KEY", "secret123"),
|
||||||
ServerURL: getEnv("SERVER_URL", "http://localhost:8080"),
|
ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"),
|
||||||
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
|
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
|
||||||
Compress: getEnv("COMPRESS", "true") == "true",
|
Compress: getEnv("COMPRESS", "true") == "true",
|
||||||
UploadURL: "/upload-stream/",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,77 @@
|
|||||||
// Package client provides ZFS snapshot backup client functionality.
|
// Package client provides ZFS snapshot backup client functionality.
|
||||||
// This file contains snapshot management functions for creating and sending snapshots.
|
// This file contains snapshot management functions including creation,
|
||||||
|
// bookmarking, and rotation similar to zfs-auto-snapshot.
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/mistifyio/go-zfs"
|
"github.com/mistifyio/go-zfs"
|
||||||
|
"github.com/pierrec/lz4/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// SnapshotPolicy defines retention settings for automatic snapshots.
|
||||||
|
type SnapshotPolicy struct {
|
||||||
|
// KeepHourly is the number of hourly snapshots to keep
|
||||||
|
KeepHourly int
|
||||||
|
// KeepDaily is the number of daily snapshots to keep
|
||||||
|
KeepDaily int
|
||||||
|
// KeepWeekly is the number of weekly snapshots to keep
|
||||||
|
KeepWeekly int
|
||||||
|
// KeepMonthly is the number of monthly snapshots to keep
|
||||||
|
KeepMonthly int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPolicy returns the default snapshot retention policy.
|
||||||
|
func DefaultPolicy() *SnapshotPolicy {
|
||||||
|
return &SnapshotPolicy{
|
||||||
|
KeepHourly: 24,
|
||||||
|
KeepDaily: 7,
|
||||||
|
KeepWeekly: 4,
|
||||||
|
KeepMonthly: 12,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotType represents the type of snapshot (hourly, daily, etc.)
|
||||||
|
type SnapshotType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
SnapshotHourly SnapshotType = "hourly"
|
||||||
|
SnapshotDaily SnapshotType = "daily"
|
||||||
|
SnapshotWeekly SnapshotType = "weekly"
|
||||||
|
SnapshotMonthly SnapshotType = "monthly"
|
||||||
|
SnapshotManual SnapshotType = "manual"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateSnapshotWithType creates a snapshot with a specific type label.
|
||||||
|
// The snapshot name follows the pattern: zfs-backup-<type>-<timestamp>
|
||||||
|
func (c *Client) CreateSnapshotWithType(snapshotType SnapshotType) (*zfs.Dataset, error) {
|
||||||
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := time.Now().Format("2006-01-02_15-04-05")
|
||||||
|
snapshotName := fmt.Sprintf("zfs-backup-%s-%s", snapshotType, timestamp)
|
||||||
|
|
||||||
|
snapshot, err := ds.Snapshot(snapshotName, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Created %s snapshot: %s@%s\n", snapshotType, c.config.LocalDataset, snapshotName)
|
||||||
|
return snapshot, nil
|
||||||
|
}
|
||||||
|
|
||||||
// CreateBookmark creates a ZFS bookmark from a snapshot.
|
// CreateBookmark creates a ZFS bookmark from a snapshot.
|
||||||
// Bookmarks allow incremental sends even after the source snapshot is deleted.
|
// Bookmarks allow incremental sends even after the source snapshot is deleted.
|
||||||
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
|
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
|
||||||
@@ -71,7 +133,297 @@ func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) {
|
|||||||
return snapshots[len(snapshots)-1], nil
|
return snapshots[len(snapshots)-1], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendIncremental is kept for API compatibility - now just calls HTTP version
|
// SendIncremental sends an incremental stream from a bookmark or snapshot.
|
||||||
func (c *Client) SendIncremental(snapshot *zfs.Dataset, datasetName, base string) error {
|
// If base is empty, sends a full stream.
|
||||||
return c.SendIncrementalHTTP(snapshot, datasetName, base)
|
func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
|
||||||
|
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||||
|
|
||||||
|
// Determine if this is incremental or full
|
||||||
|
isIncremental := base != ""
|
||||||
|
var uploadMethod string
|
||||||
|
if isIncremental {
|
||||||
|
uploadMethod = "incremental"
|
||||||
|
} else {
|
||||||
|
uploadMethod = "full"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request upload authorization from server
|
||||||
|
uploadReq := map[string]interface{}{
|
||||||
|
"client_id": c.config.ClientID,
|
||||||
|
"api_key": c.config.APIKey,
|
||||||
|
"dataset_name": c.config.LocalDataset,
|
||||||
|
"timestamp": time.Now().Format(time.RFC3339),
|
||||||
|
"compressed": c.config.Compress,
|
||||||
|
"estimated_size": estimatedSize,
|
||||||
|
"incremental": isIncremental,
|
||||||
|
"base_snapshot": base,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBody, _ := json.Marshal(uploadReq)
|
||||||
|
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to request upload: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var uploadResp struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
UploadURL string `json:"upload_url"`
|
||||||
|
UploadMethod string `json:"upload_method"`
|
||||||
|
StorageKey string `json:"storage_key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !uploadResp.Success {
|
||||||
|
return fmt.Errorf("upload not authorized: %s", uploadResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("→ Upload authorized\n")
|
||||||
|
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
||||||
|
fmt.Printf(" Type: %s\n", uploadMethod)
|
||||||
|
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||||
|
|
||||||
|
// Choose upload method based on server response
|
||||||
|
if uploadResp.UploadMethod == "s3" {
|
||||||
|
return c.streamIncrementalToS3(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||||
|
}
|
||||||
|
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3.
|
||||||
|
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
||||||
|
fmt.Printf("→ Streaming snapshot to S3...\n")
|
||||||
|
|
||||||
|
// Create ZFS send command
|
||||||
|
var cmd *exec.Cmd
|
||||||
|
if base != "" {
|
||||||
|
// Incremental send from bookmark or snapshot
|
||||||
|
fmt.Printf(" Base: %s\n", base)
|
||||||
|
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
|
||||||
|
} else {
|
||||||
|
// Full send
|
||||||
|
cmd = exec.Command("zfs", "send", snapshot.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
zfsOut, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create pipe: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start zfs send: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var reader io.Reader = zfsOut
|
||||||
|
|
||||||
|
// Apply LZ4 compression if enabled
|
||||||
|
if c.config.Compress {
|
||||||
|
fmt.Printf(" Compressing with LZ4...\n")
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
lz4Writer := lz4.NewWriter(pw)
|
||||||
|
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Copy zfs output to LZ4 writer
|
||||||
|
io.Copy(lz4Writer, zfsOut)
|
||||||
|
// Close LZ4 writer first to flush, then close pipe
|
||||||
|
lz4Writer.Close()
|
||||||
|
pw.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
reader = pr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HTTP request
|
||||||
|
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set required headers
|
||||||
|
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||||
|
req.Header.Set("X-Storage-Key", storageKey)
|
||||||
|
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||||
|
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||||
|
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
|
||||||
|
if base != "" {
|
||||||
|
req.Header.Set("X-Base-Snapshot", base)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
|
||||||
|
// Send request with no timeout for large uploads
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
httpResp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Process.Kill()
|
||||||
|
return fmt.Errorf("failed to upload: %v", err)
|
||||||
|
}
|
||||||
|
defer httpResp.Body.Close()
|
||||||
|
|
||||||
|
if httpResp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(httpResp.Body)
|
||||||
|
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("zfs send failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse response
|
||||||
|
var result struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Success {
|
||||||
|
return fmt.Errorf("upload failed: %s", result.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
||||||
|
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
|
||||||
|
// This method is used when the server uses local ZFS storage.
|
||||||
|
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
|
||||||
|
fmt.Printf("-> Sending via ZFS send/receive...\n")
|
||||||
|
|
||||||
|
// Extract server host from URL
|
||||||
|
serverHost := c.config.ServerURL
|
||||||
|
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
||||||
|
serverHost = serverHost[7:]
|
||||||
|
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
||||||
|
serverHost = serverHost[8:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove port if present
|
||||||
|
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
||||||
|
serverHost = serverHost[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build zfs send command
|
||||||
|
var zfsSendCmd string
|
||||||
|
if base != "" {
|
||||||
|
// Incremental send
|
||||||
|
fmt.Printf(" Base: %s\n", base)
|
||||||
|
zfsSendCmd = fmt.Sprintf("zfs send -i %s %s", base, snapshot.Name)
|
||||||
|
} else {
|
||||||
|
// Full send
|
||||||
|
zfsSendCmd = fmt.Sprintf("zfs send %s", snapshot.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute ZFS send over SSH
|
||||||
|
cmd := exec.Command("sh", "-c",
|
||||||
|
fmt.Sprintf("%s | ssh %s 'zfs recv -F %s'", zfsSendCmd, serverHost, receivePath))
|
||||||
|
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Snapshot sent successfully!\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RotateLocalSnapshots removes old snapshots based on the retention policy.
|
||||||
|
// This is similar to zfs-auto-snapshot's rotation behavior.
|
||||||
|
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
|
||||||
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get dataset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := ds.Snapshots()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group snapshots by type
|
||||||
|
groups := make(map[SnapshotType][]*zfs.Dataset)
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
snapType := parseSnapshotType(snap.Name)
|
||||||
|
groups[snapType] = append(groups[snapType], snap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply retention policy
|
||||||
|
deletedCount := 0
|
||||||
|
keepCount := map[SnapshotType]int{
|
||||||
|
SnapshotHourly: policy.KeepHourly,
|
||||||
|
SnapshotDaily: policy.KeepDaily,
|
||||||
|
SnapshotWeekly: policy.KeepWeekly,
|
||||||
|
SnapshotMonthly: policy.KeepMonthly,
|
||||||
|
SnapshotManual: -1, // Keep all manual snapshots
|
||||||
|
}
|
||||||
|
|
||||||
|
for snapType, snaps := range groups {
|
||||||
|
maxKeep := keepCount[snapType]
|
||||||
|
if maxKeep < 0 {
|
||||||
|
continue // Keep all
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by creation time (oldest first)
|
||||||
|
sortSnapshotsByTime(snaps)
|
||||||
|
|
||||||
|
// Delete oldest snapshots exceeding the limit
|
||||||
|
if len(snaps) > maxKeep {
|
||||||
|
toDelete := snaps[:len(snaps)-maxKeep]
|
||||||
|
for _, snap := range toDelete {
|
||||||
|
fmt.Printf(" Deleting old snapshot: %s\n", snap.Name)
|
||||||
|
if err := snap.Destroy(zfs.DestroyDefault); err != nil {
|
||||||
|
fmt.Printf(" Warning: failed to delete %s: %v\n", snap.Name, err)
|
||||||
|
} else {
|
||||||
|
deletedCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if deletedCount > 0 {
|
||||||
|
fmt.Printf("✓ Rotated %d local snapshots\n", deletedCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSnapshotType extracts the snapshot type from the snapshot name.
|
||||||
|
func parseSnapshotType(name string) SnapshotType {
|
||||||
|
if strings.Contains(name, "hourly") {
|
||||||
|
return SnapshotHourly
|
||||||
|
}
|
||||||
|
if strings.Contains(name, "daily") {
|
||||||
|
return SnapshotDaily
|
||||||
|
}
|
||||||
|
if strings.Contains(name, "weekly") {
|
||||||
|
return SnapshotWeekly
|
||||||
|
}
|
||||||
|
if strings.Contains(name, "monthly") {
|
||||||
|
return SnapshotMonthly
|
||||||
|
}
|
||||||
|
return SnapshotManual
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortSnapshotsByTime sorts snapshots by creation time (oldest first).
|
||||||
|
// Uses the snapshot name which contains timestamp for sorting.
|
||||||
|
func sortSnapshotsByTime(snaps []*zfs.Dataset) {
|
||||||
|
sort.Slice(snaps, func(i, j int) bool {
|
||||||
|
// Extract timestamp from snapshot name for comparison
|
||||||
|
// Names are like: dataset@zfs-backup-hourly-2006-01-02_15-04-05
|
||||||
|
return snaps[i].Name < snaps[j].Name
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -119,89 +119,18 @@ func (c *Client) DisplaySnapshots(snapshots []*SnapshotMetadata) {
|
|||||||
|
|
||||||
// RestoreSnapshot downloads and restores a snapshot to a local ZFS dataset.
|
// RestoreSnapshot downloads and restores a snapshot to a local ZFS dataset.
|
||||||
// If force is true, existing datasets will be overwritten.
|
// If force is true, existing datasets will be overwritten.
|
||||||
func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool, allSnapshots []*SnapshotMetadata) error {
|
func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool) error {
|
||||||
fmt.Printf("\n=== Restoring Snapshot ===\n")
|
fmt.Printf("\n=== Restoring Snapshot ===\n")
|
||||||
fmt.Printf("Source: %s\n", snapshot.SnapshotID)
|
fmt.Printf("Source: %s\n", snapshot.SnapshotID)
|
||||||
fmt.Printf("Target: %s\n", targetDataset)
|
fmt.Printf("Target: %s\n", targetDataset)
|
||||||
fmt.Printf("Size: %.2f GB\n", float64(snapshot.SizeBytes)/(1024*1024*1024))
|
fmt.Printf("Size: %.2f GB\n", float64(snapshot.SizeBytes)/(1024*1024*1024))
|
||||||
fmt.Printf("Storage: %s\n", snapshot.StorageType)
|
fmt.Printf("Storage: %s\n", snapshot.StorageType)
|
||||||
fmt.Printf("Compressed: %v\n", snapshot.Compressed)
|
fmt.Printf("Compressed: %v\n\n", snapshot.Compressed)
|
||||||
fmt.Printf("Incremental: %v\n\n", snapshot.Incremental)
|
|
||||||
|
|
||||||
// For incremental snapshots, we need to restore base first
|
// Check if target dataset exists
|
||||||
if snapshot.Incremental && snapshot.BaseSnapshot != "" {
|
if !force {
|
||||||
fmt.Printf("\n⚠ This is an INCREMENTAL backup.\n")
|
|
||||||
fmt.Printf(" Base snapshot needed: %s\n\n", snapshot.BaseSnapshot)
|
|
||||||
|
|
||||||
// Find the base snapshot in the list
|
|
||||||
var baseSnap *SnapshotMetadata
|
|
||||||
for _, s := range allSnapshots {
|
|
||||||
if s.SnapshotID == snapshot.BaseSnapshot {
|
|
||||||
baseSnap = s
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if baseSnap == nil {
|
|
||||||
return fmt.Errorf("base snapshot %s not found on server. Cannot restore incremental without base", snapshot.BaseSnapshot)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Found base snapshot:\n")
|
|
||||||
fmt.Printf(" - %s from %s (%.2f GB)\n\n",
|
|
||||||
baseSnap.SnapshotID,
|
|
||||||
baseSnap.Timestamp.Format("2006-01-02 15:04:05"),
|
|
||||||
float64(baseSnap.SizeBytes)/(1024*1024*1024))
|
|
||||||
|
|
||||||
fmt.Printf("To restore this incremental, I need to:\n")
|
|
||||||
fmt.Printf(" 1. Restore base snapshot: %s\n", baseSnap.SnapshotID)
|
|
||||||
fmt.Printf(" 2. Apply incremental: %s\n\n", snapshot.SnapshotID)
|
|
||||||
|
|
||||||
// Ask for confirmation
|
|
||||||
fmt.Printf("Continue? [y/N]: ")
|
|
||||||
var confirm string
|
|
||||||
fmt.Scanln(&confirm)
|
|
||||||
if confirm != "y" && confirm != "Y" {
|
|
||||||
fmt.Println("Cancelled.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// First restore the base snapshot
|
|
||||||
fmt.Printf("\n→ Restoring base snapshot...\n")
|
|
||||||
if err := c.restoreOneSnapshot(baseSnap, targetDataset, true); err != nil {
|
|
||||||
return fmt.Errorf("failed to restore base snapshot: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then apply the incremental
|
|
||||||
fmt.Printf("\n→ Applying incremental snapshot...\n")
|
|
||||||
if err := c.restoreOneSnapshot(snapshot, targetDataset, false); err != nil {
|
|
||||||
return fmt.Errorf("failed to apply incremental: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\n✓ Incremental restore completed!\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.restoreOneSnapshot(snapshot, targetDataset, force)
|
|
||||||
}
|
|
||||||
|
|
||||||
// restoreOneSnapshot downloads and restores a single snapshot
|
|
||||||
func (c *Client) restoreOneSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool) error {
|
|
||||||
// First, let's try to download - only destroy if download succeeds
|
|
||||||
var originalExists bool
|
|
||||||
if force {
|
|
||||||
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
||||||
originalExists = true
|
return fmt.Errorf("target dataset %s already exists. Use --force to overwrite", targetDataset)
|
||||||
fmt.Printf("→ Target dataset exists, will overwrite\n")
|
|
||||||
} else {
|
|
||||||
originalExists = false
|
|
||||||
fmt.Printf("→ Target dataset does not exist, will create new\n")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Check if target dataset exists
|
|
||||||
if !force {
|
|
||||||
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
|
||||||
return fmt.Errorf("target dataset %s already exists. Use --force to overwrite", targetDataset)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,18 +151,6 @@ func (c *Client) restoreOneSnapshot(snapshot *SnapshotMetadata, targetDataset st
|
|||||||
return fmt.Errorf("download failed: %s", body)
|
return fmt.Errorf("download failed: %s", body)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download succeeded - now safe to destroy if needed
|
|
||||||
if force && originalExists {
|
|
||||||
fmt.Printf("→ Destroying existing dataset %s...\n", targetDataset)
|
|
||||||
cmd := exec.Command("zfs", "destroy", "-r", targetDataset)
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf(" Destroy output: %s\n", string(output))
|
|
||||||
return fmt.Errorf("failed to destroy existing dataset: %v", err)
|
|
||||||
}
|
|
||||||
fmt.Printf(" Destroyed successfully\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create decompression reader if needed
|
// Create decompression reader if needed
|
||||||
var reader io.Reader = resp.Body
|
var reader io.Reader = resp.Body
|
||||||
if snapshot.Compressed {
|
if snapshot.Compressed {
|
||||||
@@ -263,13 +180,6 @@ func (c *Client) restoreOneSnapshot(snapshot *SnapshotMetadata, targetDataset st
|
|||||||
fmt.Printf("\n✓ Snapshot restored successfully!\n")
|
fmt.Printf("\n✓ Snapshot restored successfully!\n")
|
||||||
fmt.Printf(" Dataset: %s\n", targetDataset)
|
fmt.Printf(" Dataset: %s\n", targetDataset)
|
||||||
|
|
||||||
// Verify the dataset exists after restore
|
|
||||||
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
|
||||||
fmt.Printf(" Verified: dataset exists\n")
|
|
||||||
} else {
|
|
||||||
fmt.Printf(" Warning: could not verify dataset exists: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,22 +228,15 @@ func (c *Client) RestoreToFile(snapshot *SnapshotMetadata, outputFile string) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MountDataset mounts a restored dataset to a specified mountpoint for file recovery.
|
// MountSnapshot mounts a restored dataset to a specified mountpoint.
|
||||||
func (c *Client) MountDataset(dataset, mountpoint string) error {
|
// This allows browsing the restored files.
|
||||||
fmt.Printf("\n=== Mounting Dataset ===\n")
|
func (c *Client) MountSnapshot(dataset, mountpoint string) error {
|
||||||
fmt.Printf("Dataset: %s\n", dataset)
|
|
||||||
fmt.Printf("Mountpoint: %s\n\n", mountpoint)
|
|
||||||
|
|
||||||
ds, err := zfs.GetDataset(dataset)
|
ds, err := zfs.GetDataset(dataset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dataset not found: %v", err)
|
return fmt.Errorf("dataset not found: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check current mountpoint
|
// Create mountpoint if it doesn't exist
|
||||||
currentMP, _ := ds.GetProperty("mountpoint")
|
|
||||||
fmt.Printf("Current mountpoint: %s\n", currentMP)
|
|
||||||
|
|
||||||
// Create mountpoint directory if it doesn't exist
|
|
||||||
if err := os.MkdirAll(mountpoint, 0755); err != nil {
|
if err := os.MkdirAll(mountpoint, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create mountpoint: %v", err)
|
return fmt.Errorf("failed to create mountpoint: %v", err)
|
||||||
}
|
}
|
||||||
@@ -343,17 +246,13 @@ func (c *Client) MountDataset(dataset, mountpoint string) error {
|
|||||||
return fmt.Errorf("failed to set mountpoint: %v", err)
|
return fmt.Errorf("failed to set mountpoint: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mount the dataset if not already mounted
|
// Mount the dataset
|
||||||
cmd := exec.Command("zfs", "mount", dataset)
|
cmd := exec.Command("zfs", "mount", dataset)
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
// Might already be mounted, that's OK
|
return fmt.Errorf("failed to mount: %v", err)
|
||||||
fmt.Printf(" (dataset may already be mounted)\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n✓ Mounted successfully!\n")
|
fmt.Printf("✓ Mounted %s at %s\n", dataset, mountpoint)
|
||||||
fmt.Printf(" Access files at: %s\n", mountpoint)
|
|
||||||
fmt.Printf(" When done, run: umount %s\n", mountpoint)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,8 +8,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.ma-al.com/goc_marek/zfs/internal/server/templates/pages"
|
"git.ma-al.com/goc_marek/zfs/internal/server/templates/pages"
|
||||||
@@ -482,8 +480,6 @@ func (s *Server) handleAdminDeleteSnapshot(w http.ResponseWriter, r *http.Reques
|
|||||||
if snap != nil {
|
if snap != nil {
|
||||||
if snap.StorageType == "s3" && s.s3Backend != nil {
|
if snap.StorageType == "s3" && s.s3Backend != nil {
|
||||||
s.s3Backend.Delete(context.Background(), snap.StorageKey)
|
s.s3Backend.Delete(context.Background(), snap.StorageKey)
|
||||||
} else if snap.StorageType == "local" && s.localBackend != nil {
|
|
||||||
s.localBackend.Delete(context.Background(), snap.StorageKey)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -520,118 +516,6 @@ func (s *Server) handleAdminGetStats(w http.ResponseWriter, r *http.Request) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleAdminGetDatasets returns all datasets, optionally filtered by client
|
|
||||||
func (s *Server) handleAdminGetDatasets(w http.ResponseWriter, r *http.Request) {
|
|
||||||
admin, err := s.authenticateAdmin(r)
|
|
||||||
if err != nil || admin == nil {
|
|
||||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
clientID := r.URL.Query().Get("client_id")
|
|
||||||
|
|
||||||
var datasets []*DatasetConfig
|
|
||||||
if clientID != "" {
|
|
||||||
datasets, _ = s.db.GetDatasetsByClient(clientID)
|
|
||||||
} else {
|
|
||||||
datasets, _ = s.db.GetAllDatasets()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get snapshot counts for each dataset
|
|
||||||
type DatasetResponse struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
ClientID string `json:"client_id"`
|
|
||||||
DatasetName string `json:"dataset_name"`
|
|
||||||
StorageType string `json:"storage_type"`
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
SnapshotCount int `json:"snapshot_count"`
|
|
||||||
}
|
|
||||||
|
|
||||||
response := make([]DatasetResponse, len(datasets))
|
|
||||||
for i, d := range datasets {
|
|
||||||
snapshotCount, _ := s.db.GetSnapshotCountByDataset(d.ClientID, d.DatasetName)
|
|
||||||
response[i] = DatasetResponse{
|
|
||||||
ID: d.ID,
|
|
||||||
ClientID: d.ClientID,
|
|
||||||
DatasetName: d.DatasetName,
|
|
||||||
StorageType: d.StorageType,
|
|
||||||
Enabled: d.Enabled,
|
|
||||||
SnapshotCount: snapshotCount,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleAdminUpdateDeleteDataset handles PUT and DELETE for a specific dataset
|
|
||||||
func (s *Server) handleAdminUpdateDeleteDataset(w http.ResponseWriter, r *http.Request) {
|
|
||||||
admin, err := s.authenticateAdmin(r)
|
|
||||||
if err != nil || admin == nil {
|
|
||||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract dataset ID from URL
|
|
||||||
parts := strings.Split(r.URL.Path, "/")
|
|
||||||
if len(parts) < 4 {
|
|
||||||
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
datasetID, err := strconv.ParseInt(parts[len(parts)-1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, "Invalid dataset ID", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get dataset from database
|
|
||||||
dataset, err := s.db.GetDatasetByID(datasetID)
|
|
||||||
if err != nil || dataset == nil {
|
|
||||||
http.Error(w, "Dataset not found", http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method == http.MethodDelete {
|
|
||||||
// Delete dataset
|
|
||||||
if err := s.db.DeleteDataset(datasetID); err != nil {
|
|
||||||
http.Error(w, "Failed to delete dataset", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
||||||
"success": true,
|
|
||||||
"message": "Dataset deleted successfully",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method == http.MethodPut {
|
|
||||||
// Update dataset
|
|
||||||
var req struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
||||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dataset.Enabled = req.Enabled
|
|
||||||
if err := s.db.SaveDataset(dataset); err != nil {
|
|
||||||
http.Error(w, "Failed to update dataset", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
||||||
"success": true,
|
|
||||||
"message": "Dataset updated successfully",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Admin management handlers
|
// Admin management handlers
|
||||||
|
|
||||||
// handleAdminGetAdmins returns all admins
|
// handleAdminGetAdmins returns all admins
|
||||||
|
|||||||
@@ -92,7 +92,6 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
|
|
||||||
<div class="tabs">
|
<div class="tabs">
|
||||||
<button class="tab active" data-tab="clients" onclick="showTab('clients')">Clients</button>
|
<button class="tab active" data-tab="clients" onclick="showTab('clients')">Clients</button>
|
||||||
<button class="tab" data-tab="datasets" onclick="showTab('datasets')">Datasets</button>
|
|
||||||
<button class="tab" data-tab="snapshots" onclick="showTab('snapshots')">Snapshots</button>
|
<button class="tab" data-tab="snapshots" onclick="showTab('snapshots')">Snapshots</button>
|
||||||
<button class="tab" data-tab="admins" onclick="showTab('admins')">Admins</button>
|
<button class="tab" data-tab="admins" onclick="showTab('admins')">Admins</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -122,32 +121,6 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="datasets-tab" class="hidden">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-header">
|
|
||||||
<h3>Datasets</h3>
|
|
||||||
<select id="dataset-client-filter" onchange="loadDatasets()">
|
|
||||||
<option value="">All Clients</option>
|
|
||||||
</select>
|
|
||||||
</div>
|
|
||||||
<div class="card-body">
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Client</th>
|
|
||||||
<th>Dataset Name</th>
|
|
||||||
<th>Storage Type</th>
|
|
||||||
<th>Status</th>
|
|
||||||
<th>Snapshots</th>
|
|
||||||
<th>Actions</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody id="datasets-table"></tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div id="snapshots-tab" class="hidden">
|
<div id="snapshots-tab" class="hidden">
|
||||||
<div class="card">
|
<div class="card">
|
||||||
<div class="card-header">
|
<div class="card-header">
|
||||||
@@ -161,11 +134,9 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Client</th>
|
<th>Client</th>
|
||||||
<th>Dataset</th>
|
|
||||||
<th>Snapshot ID</th>
|
<th>Snapshot ID</th>
|
||||||
<th>Timestamp</th>
|
<th>Timestamp</th>
|
||||||
<th>Size</th>
|
<th>Size</th>
|
||||||
<th>Storage</th>
|
|
||||||
<th>Type</th>
|
<th>Type</th>
|
||||||
<th>Actions</th>
|
<th>Actions</th>
|
||||||
</tr>
|
</tr>
|
||||||
@@ -503,76 +474,6 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load datasets
|
|
||||||
async function loadDatasets() {
|
|
||||||
const clientId = document.getElementById('dataset-client-filter').value;
|
|
||||||
const url = '/admin/datasets' + (clientId ? '?client_id=' + clientId : '');
|
|
||||||
|
|
||||||
try {
|
|
||||||
const res = await fetch(url);
|
|
||||||
const datasets = await res.json();
|
|
||||||
|
|
||||||
const tbody = document.getElementById('datasets-table');
|
|
||||||
tbody.innerHTML = datasets.map(d =>
|
|
||||||
'<tr>' +
|
|
||||||
'<td>' + d.client_id + '</td>' +
|
|
||||||
'<td><strong>' + d.dataset_name + '</strong></td>' +
|
|
||||||
'<td><span class="badge badge-info">' + d.storage_type + '</span></td>' +
|
|
||||||
'<td>' + (d.enabled ? '<span class="badge badge-success">Enabled</span>' : '<span class="badge badge-danger">Disabled</span>') + '</td>' +
|
|
||||||
'<td>' + (d.snapshot_count || 0) + '</td>' +
|
|
||||||
'<td>' +
|
|
||||||
'<button class="btn btn-sm ' + (d.enabled ? 'btn-danger' : 'btn-success') + '" onclick="toggleDataset(' + d.id + ', ' + !d.enabled + ')">' + (d.enabled ? 'Disable' : 'Enable') + '</button>' +
|
|
||||||
'<button class="btn btn-sm btn-danger" onclick="deleteDataset(' + d.id + ', \'' + d.dataset_name + '\')">Delete</button>' +
|
|
||||||
'</td>' +
|
|
||||||
'</tr>'
|
|
||||||
).join('');
|
|
||||||
|
|
||||||
// Update client filter if not set
|
|
||||||
if (!clientId) {
|
|
||||||
const clientsRes = await fetch('/admin/clients');
|
|
||||||
const clients = await clientsRes.json();
|
|
||||||
const filter = document.getElementById('dataset-client-filter');
|
|
||||||
filter.innerHTML = '<option value="">All Clients</option>' +
|
|
||||||
clients.map(c => '<option value="' + c.client_id + '">' + c.client_id + '</option>').join('');
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.error('Failed to load datasets:', e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Toggle dataset enabled/disabled
|
|
||||||
async function toggleDataset(id, enabled) {
|
|
||||||
try {
|
|
||||||
const res = await fetch('/admin/datasets/' + id, {
|
|
||||||
method: 'PUT',
|
|
||||||
headers: {'Content-Type': 'application/json'},
|
|
||||||
body: JSON.stringify({enabled: enabled})
|
|
||||||
});
|
|
||||||
if (res.ok) {
|
|
||||||
loadDatasets();
|
|
||||||
} else {
|
|
||||||
alert('Failed to update dataset');
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.error('Failed to toggle dataset:', e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete dataset
|
|
||||||
async function deleteDataset(id, name) {
|
|
||||||
if (!confirm('Delete dataset ' + name + '?')) return;
|
|
||||||
try {
|
|
||||||
const res = await fetch('/admin/datasets/' + id, {method: 'DELETE'});
|
|
||||||
if (res.ok) {
|
|
||||||
loadDatasets();
|
|
||||||
} else {
|
|
||||||
alert('Failed to delete dataset');
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.error('Failed to delete dataset:', e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load snapshots
|
// Load snapshots
|
||||||
async function loadSnapshots() {
|
async function loadSnapshots() {
|
||||||
const clientId = document.getElementById('snapshot-client-filter').value;
|
const clientId = document.getElementById('snapshot-client-filter').value;
|
||||||
@@ -587,14 +488,12 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
const sizeGB = (s.size_bytes / (1024*1024*1024)).toFixed(2);
|
const sizeGB = (s.size_bytes / (1024*1024*1024)).toFixed(2);
|
||||||
return '<tr>' +
|
return '<tr>' +
|
||||||
'<td>' + s.client_id + '</td>' +
|
'<td>' + s.client_id + '</td>' +
|
||||||
'<td>' + (s.dataset_name || '-') + '</td>' +
|
|
||||||
'<td>' + s.snapshot_id + '</td>' +
|
'<td>' + s.snapshot_id + '</td>' +
|
||||||
'<td>' + new Date(s.timestamp).toLocaleString() + '</td>' +
|
'<td>' + new Date(s.timestamp).toLocaleString() + '</td>' +
|
||||||
'<td>' + sizeGB + ' GB</td>' +
|
'<td>' + sizeGB + ' GB</td>' +
|
||||||
'<td><span class="badge ' + (s.storage_type === 's3' ? 'badge-info' : 'badge-warning') + '">' + s.storage_type + '</span></td>' +
|
|
||||||
'<td>' +
|
'<td>' +
|
||||||
(s.incremental ? '<span class="badge badge-info">Inc</span>' : '<span class="badge badge-success">Full</span>') +
|
(s.incremental ? '<span class="badge badge-info">Incremental</span>' : '<span class="badge badge-success">Full</span>') +
|
||||||
(s.compressed ? ' <span class="badge badge-info">LZ4</span>' : '') +
|
(s.compressed ? ' <span class="badge badge-info">Compressed</span>' : '') +
|
||||||
'</td>' +
|
'</td>' +
|
||||||
'<td><button class="btn btn-sm btn-danger" onclick="deleteSnapshot(\'' + s.client_id + '\', \'' + s.snapshot_id + '\')">Delete</button></td>' +
|
'<td><button class="btn btn-sm btn-danger" onclick="deleteSnapshot(\'' + s.client_id + '\', \'' + s.snapshot_id + '\')">Delete</button></td>' +
|
||||||
'</tr>';
|
'</tr>';
|
||||||
@@ -635,14 +534,12 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
document.querySelector('.tab[data-tab="' + tab + '"]').classList.add('active');
|
document.querySelector('.tab[data-tab="' + tab + '"]').classList.add('active');
|
||||||
|
|
||||||
document.getElementById('clients-tab').classList.add('hidden');
|
document.getElementById('clients-tab').classList.add('hidden');
|
||||||
document.getElementById('datasets-tab').classList.add('hidden');
|
|
||||||
document.getElementById('snapshots-tab').classList.add('hidden');
|
document.getElementById('snapshots-tab').classList.add('hidden');
|
||||||
document.getElementById('admins-tab').classList.add('hidden');
|
document.getElementById('admins-tab').classList.add('hidden');
|
||||||
document.getElementById(tab + '-tab').classList.remove('hidden');
|
document.getElementById(tab + '-tab').classList.remove('hidden');
|
||||||
|
|
||||||
if (tab === 'snapshots') loadSnapshots();
|
if (tab === 'snapshots') loadSnapshots();
|
||||||
if (tab === 'admins') loadAdmins();
|
if (tab === 'admins') loadAdmins();
|
||||||
if (tab === 'datasets') loadDatasets();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Modal functions
|
// Modal functions
|
||||||
|
|||||||
@@ -13,8 +13,7 @@ type Config struct {
|
|||||||
S3SecretKey string
|
S3SecretKey string
|
||||||
S3BucketName string
|
S3BucketName string
|
||||||
S3UseSSL bool
|
S3UseSSL bool
|
||||||
S3Enabled bool // Enable/disable S3 backend
|
S3Enabled bool // Enable/disable S3 backend
|
||||||
S3Region string // AWS region
|
|
||||||
BaseDataset string
|
BaseDataset string
|
||||||
DatabasePath string // Path to SQLite database
|
DatabasePath string // Path to SQLite database
|
||||||
Port string
|
Port string
|
||||||
@@ -41,7 +40,6 @@ func LoadConfig() *Config {
|
|||||||
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
|
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
|
||||||
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
|
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
|
||||||
S3Enabled: s3Enabled,
|
S3Enabled: s3Enabled,
|
||||||
S3Region: getEnv("S3_REGION", "us-east-1"),
|
|
||||||
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
|
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
|
||||||
DatabasePath: getEnv("DATABASE_PATH", "zfs-backup.db"),
|
DatabasePath: getEnv("DATABASE_PATH", "zfs-backup.db"),
|
||||||
Port: getEnv("PORT", "8080"),
|
Port: getEnv("PORT", "8080"),
|
||||||
|
|||||||
@@ -89,23 +89,6 @@ func (d *Database) initTables() error {
|
|||||||
return fmt.Errorf("failed to create clients table: %v", err)
|
return fmt.Errorf("failed to create clients table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Datasets table - multiple datasets per client
|
|
||||||
_, err = d.db.Exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS datasets (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
client_id TEXT NOT NULL,
|
|
||||||
dataset_name TEXT NOT NULL,
|
|
||||||
storage_type TEXT NOT NULL DEFAULT 's3',
|
|
||||||
enabled INTEGER NOT NULL DEFAULT 1,
|
|
||||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
FOREIGN KEY (client_id) REFERENCES clients(client_id) ON DELETE CASCADE,
|
|
||||||
UNIQUE(client_id, dataset_name)
|
|
||||||
)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create datasets table: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshots table
|
// Snapshots table
|
||||||
_, err = d.db.Exec(`
|
_, err = d.db.Exec(`
|
||||||
CREATE TABLE IF NOT EXISTS snapshots (
|
CREATE TABLE IF NOT EXISTS snapshots (
|
||||||
@@ -417,162 +400,6 @@ func (d *Database) CreateDefaultClient() error {
|
|||||||
return d.SaveClient(defaultClient)
|
return d.SaveClient(defaultClient)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDefaultDataset creates a default dataset for a client if none exists
|
|
||||||
func (d *Database) CreateDefaultDataset(clientID, datasetName string) error {
|
|
||||||
datasets, err := d.GetDatasetsByClient(clientID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(datasets) > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create default dataset
|
|
||||||
dataset := &DatasetConfig{
|
|
||||||
ClientID: clientID,
|
|
||||||
DatasetName: datasetName,
|
|
||||||
StorageType: "s3",
|
|
||||||
Enabled: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.SaveDataset(dataset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DatasetConfig represents a dataset configuration
|
|
||||||
type DatasetConfig struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
ClientID string `json:"client_id"`
|
|
||||||
DatasetName string `json:"dataset_name"`
|
|
||||||
StorageType string `json:"storage_type"`
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDatasetsByClient gets all datasets for a client
|
|
||||||
func (d *Database) GetDatasetsByClient(clientID string) ([]*DatasetConfig, error) {
|
|
||||||
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ?`
|
|
||||||
|
|
||||||
rows, err := d.db.Query(query, clientID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var datasets []*DatasetConfig
|
|
||||||
for rows.Next() {
|
|
||||||
dataset := &DatasetConfig{}
|
|
||||||
var enabled int
|
|
||||||
|
|
||||||
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dataset.Enabled = enabled == 1
|
|
||||||
datasets = append(datasets, dataset)
|
|
||||||
}
|
|
||||||
|
|
||||||
return datasets, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDatasetByName gets a dataset by client and dataset name
|
|
||||||
func (d *Database) GetDatasetByName(clientID, datasetName string) (*DatasetConfig, error) {
|
|
||||||
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ? AND dataset_name = ?`
|
|
||||||
|
|
||||||
row := d.db.QueryRow(query, clientID, datasetName)
|
|
||||||
|
|
||||||
dataset := &DatasetConfig{}
|
|
||||||
var enabled int
|
|
||||||
|
|
||||||
err := row.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dataset.Enabled = enabled == 1
|
|
||||||
return dataset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveDataset saves or updates a dataset
|
|
||||||
func (d *Database) SaveDataset(dataset *DatasetConfig) error {
|
|
||||||
enabled := 0
|
|
||||||
if dataset.Enabled {
|
|
||||||
enabled = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if dataset.ID == 0 {
|
|
||||||
// Insert new
|
|
||||||
_, err := d.db.Exec(`INSERT INTO datasets (client_id, dataset_name, storage_type, enabled) VALUES (?, ?, ?, ?)`,
|
|
||||||
dataset.ClientID, dataset.DatasetName, dataset.StorageType, enabled)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update existing
|
|
||||||
_, err := d.db.Exec(`UPDATE datasets SET storage_type = ?, enabled = ? WHERE id = ?`,
|
|
||||||
dataset.StorageType, enabled, dataset.ID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteDataset deletes a dataset
|
|
||||||
func (d *Database) DeleteDataset(id int64) error {
|
|
||||||
_, err := d.db.Exec(`DELETE FROM datasets WHERE id = ?`, id)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDatasetByID gets a dataset by ID
|
|
||||||
func (d *Database) GetDatasetByID(id int64) (*DatasetConfig, error) {
|
|
||||||
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE id = ?`
|
|
||||||
|
|
||||||
row := d.db.QueryRow(query, id)
|
|
||||||
dataset := &DatasetConfig{}
|
|
||||||
var enabled int
|
|
||||||
|
|
||||||
err := row.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dataset.Enabled = enabled == 1
|
|
||||||
return dataset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSnapshotCountByDataset gets snapshot count for a specific dataset
|
|
||||||
func (d *Database) GetSnapshotCountByDataset(clientID, datasetName string) (int, error) {
|
|
||||||
var count int
|
|
||||||
err := d.db.QueryRow(`SELECT COUNT(*) FROM snapshots WHERE client_id = ? AND dataset_name = ?`, clientID, datasetName).Scan(&count)
|
|
||||||
return count, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllDatasets gets all datasets
|
|
||||||
func (d *Database) GetAllDatasets() ([]*DatasetConfig, error) {
|
|
||||||
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets`
|
|
||||||
|
|
||||||
rows, err := d.db.Query(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var datasets []*DatasetConfig
|
|
||||||
for rows.Next() {
|
|
||||||
dataset := &DatasetConfig{}
|
|
||||||
var enabled int
|
|
||||||
|
|
||||||
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dataset.Enabled = enabled == 1
|
|
||||||
datasets = append(datasets, dataset)
|
|
||||||
}
|
|
||||||
|
|
||||||
return datasets, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSnapshotByID retrieves a specific snapshot
|
// GetSnapshotByID retrieves a specific snapshot
|
||||||
func (d *Database) GetSnapshotByID(clientID, snapshotID string) (*SnapshotMetadata, error) {
|
func (d *Database) GetSnapshotByID(clientID, snapshotID string) (*SnapshotMetadata, error) {
|
||||||
snap := &SnapshotMetadata{}
|
snap := &SnapshotMetadata{}
|
||||||
|
|||||||
@@ -126,21 +126,21 @@ func (s *Server) rotateSnapshots(clientID string) (int, int64) {
|
|||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete snapshots - use correct backend based on each snapshot's storage type
|
// Select appropriate backend
|
||||||
|
var backend StorageBackend
|
||||||
|
if s.s3Backend != nil {
|
||||||
|
backend = s.s3Backend
|
||||||
|
} else if s.localBackend != nil {
|
||||||
|
backend = s.localBackend
|
||||||
|
} else {
|
||||||
|
log.Printf("No storage backend available for rotation")
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete snapshots
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for _, snap := range toDelete {
|
for _, snap := range toDelete {
|
||||||
// Determine which backend to use for this specific snapshot
|
if err := backend.Delete(ctx, snap.StorageKey); err != nil {
|
||||||
var snapBackend StorageBackend
|
|
||||||
if snap.StorageType == "s3" && s.s3Backend != nil {
|
|
||||||
snapBackend = s.s3Backend
|
|
||||||
} else if snap.StorageType == "local" && s.localBackend != nil {
|
|
||||||
snapBackend = s.localBackend
|
|
||||||
} else {
|
|
||||||
log.Printf("No storage backend available for snapshot %s (type: %s)", snap.SnapshotID, snap.StorageType)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := snapBackend.Delete(ctx, snap.StorageKey); err != nil {
|
|
||||||
log.Printf("Error deleting snapshot %s: %v", snap.StorageKey, err)
|
log.Printf("Error deleting snapshot %s: %v", snap.StorageKey, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -238,7 +238,6 @@ func (s *Server) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
|||||||
Message: "Ready to receive snapshot",
|
Message: "Ready to receive snapshot",
|
||||||
UploadMethod: "zfs-receive",
|
UploadMethod: "zfs-receive",
|
||||||
StorageKey: snapshotName,
|
StorageKey: snapshotName,
|
||||||
UploadURL: fmt.Sprintf("/upload-stream/%s", req.ClientID),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -271,36 +270,6 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if dataset is allowed for this client
|
|
||||||
dataset, err := s.db.GetDatasetByName(clientID, datasetName)
|
|
||||||
if err != nil || dataset == nil {
|
|
||||||
// Auto-create dataset if not exists
|
|
||||||
log.Printf("Dataset %s not found for client %s, creating...", datasetName, clientID)
|
|
||||||
newDataset := &DatasetConfig{
|
|
||||||
ClientID: clientID,
|
|
||||||
DatasetName: datasetName,
|
|
||||||
StorageType: "s3",
|
|
||||||
Enabled: true,
|
|
||||||
}
|
|
||||||
if err := s.db.SaveDataset(newDataset); err != nil {
|
|
||||||
log.Printf("Error creating dataset: %v", err)
|
|
||||||
respondJSON(w, http.StatusForbidden, UploadResponse{
|
|
||||||
Success: false,
|
|
||||||
Message: "Dataset not configured for this client",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dataset = newDataset
|
|
||||||
}
|
|
||||||
|
|
||||||
if !dataset.Enabled {
|
|
||||||
respondJSON(w, http.StatusForbidden, UploadResponse{
|
|
||||||
Success: false,
|
|
||||||
Message: "Dataset is disabled",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Upload to S3
|
// Upload to S3
|
||||||
@@ -311,52 +280,26 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
|||||||
size = -1 // Use streaming upload for unknown size
|
size = -1 // Use streaming upload for unknown size
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.s3Backend == nil && s.localBackend == nil {
|
if s.s3Backend == nil {
|
||||||
log.Printf("Error: No storage backend configured")
|
log.Printf("Error: S3 backend not initialized")
|
||||||
http.Error(w, "No storage backend configured", http.StatusInternalServerError)
|
http.Error(w, "S3 backend not configured", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine storage type based on client configuration
|
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
|
||||||
client, err := s.db.GetClient(clientID)
|
log.Printf("Error uploading to S3: %v", err)
|
||||||
if err != nil || client == nil {
|
http.Error(w, "Upload failed", http.StatusInternalServerError)
|
||||||
http.Error(w, "Client not found", http.StatusNotFound)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var actualSize int64
|
// Get actual size after upload
|
||||||
|
actualSize, err := s.s3Backend.GetSize(ctx, storageKey)
|
||||||
// Handle based on storage type
|
if err != nil {
|
||||||
if client.StorageType == "s3" && s.s3Backend != nil {
|
log.Printf("Error getting object size: %v", err)
|
||||||
// Upload to S3
|
|
||||||
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
|
|
||||||
log.Printf("Error uploading to S3: %v", err)
|
|
||||||
http.Error(w, "Upload failed", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get actual size after upload
|
|
||||||
actualSize, err = s.s3Backend.GetSize(ctx, storageKey)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error getting object size: %v", err)
|
|
||||||
actualSize = size
|
|
||||||
}
|
|
||||||
} else if client.StorageType == "local" && s.localBackend != nil {
|
|
||||||
// Upload to local ZFS
|
|
||||||
if err := s.localBackend.Receive(storageKey, r.Body, compressedStr == "true"); err != nil {
|
|
||||||
log.Printf("Error uploading to local ZFS: %v", err)
|
|
||||||
http.Error(w, "Upload failed", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
actualSize = size
|
actualSize = size
|
||||||
} else {
|
|
||||||
log.Printf("Error: Storage type %s not configured", client.StorageType)
|
|
||||||
http.Error(w, "Storage type not configured", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save metadata to database
|
// Save metadata to database
|
||||||
// Use actual storage type where snapshot was stored (not always s3)
|
|
||||||
metadata := &SnapshotMetadata{
|
metadata := &SnapshotMetadata{
|
||||||
ClientID: clientID,
|
ClientID: clientID,
|
||||||
SnapshotID: storageKey,
|
SnapshotID: storageKey,
|
||||||
@@ -364,7 +307,7 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
|||||||
SizeBytes: actualSize,
|
SizeBytes: actualSize,
|
||||||
DatasetName: datasetName,
|
DatasetName: datasetName,
|
||||||
StorageKey: storageKey,
|
StorageKey: storageKey,
|
||||||
StorageType: client.StorageType, // Use actual storage type from client config
|
StorageType: "s3",
|
||||||
Compressed: compressedStr == "true",
|
Compressed: compressedStr == "true",
|
||||||
Incremental: incrementalStr == "true",
|
Incremental: incrementalStr == "true",
|
||||||
BaseSnapshot: baseSnapshot,
|
BaseSnapshot: baseSnapshot,
|
||||||
@@ -374,19 +317,10 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
|||||||
log.Printf("Error saving snapshot metadata: %v", err)
|
log.Printf("Error saving snapshot metadata: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run rotation after successful upload
|
|
||||||
deletedCount, reclaimedBytes := s.rotateSnapshots(clientID)
|
|
||||||
if deletedCount > 0 {
|
|
||||||
log.Printf("Rotation: deleted %d snapshots, reclaimed %.2f MB for client %s",
|
|
||||||
deletedCount, float64(reclaimedBytes)/(1024*1024), clientID)
|
|
||||||
}
|
|
||||||
|
|
||||||
respondJSON(w, http.StatusOK, map[string]interface{}{
|
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||||
"success": true,
|
"success": true,
|
||||||
"message": "Snapshot uploaded successfully",
|
"message": "Snapshot uploaded successfully",
|
||||||
"size": actualSize,
|
"size": actualSize,
|
||||||
"deleted_count": deletedCount,
|
|
||||||
"reclaimed_bytes": reclaimedBytes,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -468,23 +402,28 @@ func (s *Server) HandleDownload(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find snapshot metadata
|
||||||
|
client, err := s.db.GetClient(clientID)
|
||||||
|
if err != nil || client == nil {
|
||||||
|
http.Error(w, "Client not found", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
targetSnapshot, err := s.db.GetSnapshotByID(clientID, snapshotID)
|
targetSnapshot, err := s.db.GetSnapshotByID(clientID, snapshotID)
|
||||||
if err != nil || targetSnapshot == nil {
|
if err != nil || targetSnapshot == nil {
|
||||||
http.Error(w, "Snapshot not found", http.StatusNotFound)
|
http.Error(w, "Snapshot not found", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use snapshot's own storage_type to determine which backend to use
|
|
||||||
// This enables mixed storage scenarios (e.g., full on local, incrementals on S3)
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
var backend StorageBackend
|
var backend StorageBackend
|
||||||
|
|
||||||
if targetSnapshot.StorageType == "s3" && s.s3Backend != nil {
|
if client.StorageType == "s3" && s.s3Backend != nil {
|
||||||
backend = s.s3Backend
|
backend = s.s3Backend
|
||||||
} else if targetSnapshot.StorageType == "local" && s.localBackend != nil {
|
} else if s.localBackend != nil {
|
||||||
backend = s.localBackend
|
backend = s.localBackend
|
||||||
} else {
|
} else {
|
||||||
http.Error(w, "No storage backend available for this snapshot's storage type", http.StatusInternalServerError)
|
http.Error(w, "No storage backend available", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -583,8 +522,6 @@ func (s *Server) RegisterRoutes(mux *http.ServeMux) {
|
|||||||
mux.HandleFunc("/admin/snapshots", s.handleAdminGetSnapshots)
|
mux.HandleFunc("/admin/snapshots", s.handleAdminGetSnapshots)
|
||||||
mux.HandleFunc("/admin/snapshot/delete", s.handleAdminDeleteSnapshot)
|
mux.HandleFunc("/admin/snapshot/delete", s.handleAdminDeleteSnapshot)
|
||||||
mux.HandleFunc("/admin/stats", s.handleAdminGetStats)
|
mux.HandleFunc("/admin/stats", s.handleAdminGetStats)
|
||||||
mux.HandleFunc("/admin/datasets", s.handleAdminGetDatasets)
|
|
||||||
mux.HandleFunc("/admin/datasets/{id}", s.handleAdminUpdateDeleteDataset)
|
|
||||||
mux.HandleFunc("/admin/admins", s.handleAdminGetAdmins)
|
mux.HandleFunc("/admin/admins", s.handleAdminGetAdmins)
|
||||||
mux.HandleFunc("/admin/admin/create", s.handleAdminCreateAdmin)
|
mux.HandleFunc("/admin/admin/create", s.handleAdminCreateAdmin)
|
||||||
mux.HandleFunc("/admin/admin/delete", s.handleAdminDeleteAdmin)
|
mux.HandleFunc("/admin/admin/delete", s.handleAdminDeleteAdmin)
|
||||||
|
|||||||
@@ -6,15 +6,12 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
"github.com/mistifyio/go-zfs"
|
"github.com/mistifyio/go-zfs"
|
||||||
"github.com/pierrec/lz4/v4"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// StorageBackend defines the interface for different storage types
|
// StorageBackend defines the interface for different storage types
|
||||||
@@ -26,22 +23,25 @@ type StorageBackend interface {
|
|||||||
GetSize(ctx context.Context, key string) (int64, error)
|
GetSize(ctx context.Context, key string) (int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// S3Backend implements StorageBackend for S3-compatible storage using minio-go
|
// S3Backend implements StorageBackend for S3-compatible storage
|
||||||
type S3Backend struct {
|
type S3Backend struct {
|
||||||
client *minio.Client
|
client *minio.Client
|
||||||
bucketName string
|
bucketName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewS3Backend creates a new S3 storage backend using minio-go
|
// NewS3Backend creates a new S3 storage backend
|
||||||
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
|
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
|
||||||
// Create custom HTTP transport with extended timeouts for large file uploads
|
// Create custom HTTP transport with extended timeouts for large file uploads
|
||||||
transport := &http.Transport{
|
transport := &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
TLSClientConfig: nil,
|
// Extended timeouts for streaming large ZFS snapshots
|
||||||
IdleConnTimeout: 90 * time.Second,
|
ResponseHeaderTimeout: 5 * time.Minute,
|
||||||
|
ExpectContinueTimeout: 30 * time.Second,
|
||||||
|
IdleConnTimeout: 90 * time.Second,
|
||||||
// Connection pooling
|
// Connection pooling
|
||||||
MaxIdleConns: 10,
|
MaxIdleConns: 10,
|
||||||
MaxIdleConnsPerHost: 10,
|
MaxIdleConnsPerHost: 10,
|
||||||
|
DisableCompression: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := minio.New(endpoint, &minio.Options{
|
client, err := minio.New(endpoint, &minio.Options{
|
||||||
@@ -74,7 +74,7 @@ func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload uploads data to S3 using minio-go
|
// Upload uploads data to S3
|
||||||
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
|
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
|
||||||
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
|
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
|
||||||
minio.PutObjectOptions{
|
minio.PutObjectOptions{
|
||||||
@@ -141,41 +141,6 @@ func (l *LocalBackend) Upload(ctx context.Context, key string, data io.Reader, s
|
|||||||
return fmt.Errorf("local backend upload not supported via storage interface, use zfs receive endpoint")
|
return fmt.Errorf("local backend upload not supported via storage interface, use zfs receive endpoint")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receive receives a ZFS snapshot stream and restores it to the local dataset
|
|
||||||
func (l *LocalBackend) Receive(snapshotName string, data io.Reader, compressed bool) error {
|
|
||||||
// Extract the target dataset from the snapshot name
|
|
||||||
// snapshotName format: dataset@name -> we want just the dataset part
|
|
||||||
parts := strings.Split(snapshotName, "@")
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid snapshot name format: %s", snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
targetDataset := parts[0]
|
|
||||||
|
|
||||||
log.Printf("Receiving ZFS snapshot to %s (compressed: %v)", targetDataset, compressed)
|
|
||||||
|
|
||||||
// If compressed, decompress with LZ4 first
|
|
||||||
var reader io.Reader = data
|
|
||||||
if compressed {
|
|
||||||
lz4Reader := lz4.NewReader(data)
|
|
||||||
reader = lz4Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use go-zfs library to receive the snapshot (with -F force flag)
|
|
||||||
// Note: The library's ReceiveSnapshot doesn't support -F, so we use exec.Command
|
|
||||||
cmd := exec.Command("zfs", "receive", "-F", snapshotName)
|
|
||||||
cmd.Stdin = reader
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("zfs receive failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Successfully received snapshot: %s", snapshotName)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Download creates a zfs send stream
|
// Download creates a zfs send stream
|
||||||
func (l *LocalBackend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
func (l *LocalBackend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||||
cmd := exec.CommandContext(ctx, "zfs", "send", key)
|
cmd := exec.CommandContext(ctx, "zfs", "send", key)
|
||||||
|
|||||||
@@ -260,5 +260,4 @@ templ ClientPasswordModal() {
|
|||||||
// AdminScripts renders the JavaScript for the admin panel
|
// AdminScripts renders the JavaScript for the admin panel
|
||||||
templ AdminScripts() {
|
templ AdminScripts() {
|
||||||
<script src="/admin/static/admin.js"></script>
|
<script src="/admin/static/admin.js"></script>
|
||||||
<script>initTheme();</script>
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,30 +19,6 @@ async function logout() {
|
|||||||
location.reload();
|
location.reload();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Toggle dark/light theme
|
|
||||||
function toggleTheme() {
|
|
||||||
const html = document.documentElement;
|
|
||||||
const isDark = html.classList.contains('dark');
|
|
||||||
|
|
||||||
if (isDark) {
|
|
||||||
html.classList.remove('dark');
|
|
||||||
localStorage.setItem('theme', 'light');
|
|
||||||
} else {
|
|
||||||
html.classList.add('dark');
|
|
||||||
localStorage.setItem('theme', 'dark');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize theme on load
|
|
||||||
function initTheme() {
|
|
||||||
const savedTheme = localStorage.getItem('theme');
|
|
||||||
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
|
|
||||||
|
|
||||||
if (savedTheme === 'dark' || (!savedTheme && prefersDark)) {
|
|
||||||
document.documentElement.classList.add('dark');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load stats
|
// Load stats
|
||||||
async function loadStats() {
|
async function loadStats() {
|
||||||
try {
|
try {
|
||||||
|
|||||||
83
readme.md
83
readme.md
@@ -4,14 +4,13 @@ A distributed ZFS snapshot management system with S3-compatible storage support.
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- **S3 Storage Support**: Store snapshots in any S3-compatible storage using AWS SDK v2 (AWS S3, MinIO, Backblaze B2, Wasabi, DigitalOcean Spaces)
|
- **S3 Storage Support**: Store snapshots in any S3-compatible storage (AWS S3, MinIO, Backblaze B2, Wasabi, DigitalOcean Spaces)
|
||||||
- **Local ZFS Storage**: Option to use local ZFS datasets for maximum performance
|
- **Local ZFS Storage**: Option to use local ZFS datasets for maximum performance
|
||||||
- **Multi-client Architecture**: Support for multiple clients with isolated storage and per-client quotas
|
- **Multi-client Architecture**: Support for multiple clients with isolated storage and per-client quotas
|
||||||
- **Automatic Compression**: LZ4 compression for reduced storage costs and faster transfers
|
- **Automatic Compression**: Gzip compression for reduced storage costs
|
||||||
- **Snapshot Rotation**: Automatic cleanup of old snapshots based on quota
|
- **Snapshot Rotation**: Automatic cleanup of old snapshots based on quota
|
||||||
- **Server-Managed Rotation Policies**: Centralized control of client rotation policies - clients must use server-configured retention settings
|
- **Server-Managed Rotation Policies**: Centralized control of client rotation policies - clients must use server-configured retention settings
|
||||||
- **API Key Authentication**: Secure client-server communication
|
- **API Key Authentication**: Secure client-server communication
|
||||||
- **Simple CLI**: Just use `zfs-client snap` to backup - automatically handles full/incremental
|
|
||||||
|
|
||||||
## Project Structure
|
## Project Structure
|
||||||
|
|
||||||
@@ -94,19 +93,11 @@ API_KEY=secret123
|
|||||||
SERVER_URL=http://backup-server:8080
|
SERVER_URL=http://backup-server:8080
|
||||||
LOCAL_DATASET=tank/data
|
LOCAL_DATASET=tank/data
|
||||||
COMPRESS=true
|
COMPRESS=true
|
||||||
|
|
||||||
# Optional: Direct S3 upload (bypasses server storage)
|
|
||||||
S3_ENDPOINT=https://s3.amazonaws.com
|
|
||||||
S3_REGION=us-east-1
|
|
||||||
S3_BUCKET=zfs-backups
|
|
||||||
S3_ACCESS_KEY=your_access_key
|
|
||||||
S3_SECRET_KEY=your_secret_key
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Important**:
|
> **Important**:
|
||||||
> - The `API_KEY` in the client `.env` file must be the **raw (unhashed)** key. The server stores the SHA-256 hash in the database.
|
> - The `API_KEY` in the client `.env` file must be the **raw (unhashed)** key. The server stores the SHA-256 hash in the database.
|
||||||
> - **Storage type is determined by the server**, not the client. The server decides whether to use S3 or local ZFS storage based on its configuration.
|
> - **Storage type is determined by the server**, not the client. The server decides whether to use S3 or local ZFS storage based on its configuration.
|
||||||
> - The client automatically handles full vs incremental backups based on whether a bookmark exists.
|
|
||||||
|
|
||||||
### Restore Tool Configuration
|
### Restore Tool Configuration
|
||||||
|
|
||||||
@@ -137,18 +128,52 @@ zfs-server
|
|||||||
|
|
||||||
### Client Commands
|
### Client Commands
|
||||||
|
|
||||||
The `zfs-client` tool provides simple commands for creating and sending ZFS snapshots:
|
The `zfs-client` tool provides the following commands for managing ZFS snapshots:
|
||||||
|
|
||||||
#### `snap`
|
#### `backup`
|
||||||
Creates a snapshot and sends it to the server. Automatically detects if this is the first backup (full) or subsequent backup (incremental).
|
Creates a snapshot and sends it to the server. Automatically uses incremental backup if a bookmark exists.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
zfs-client snap
|
zfs-client backup
|
||||||
```
|
```
|
||||||
|
|
||||||
On first run, it will print: `→ No previous backup found, doing FULL backup...`
|
#### `backup-full`
|
||||||
|
Forces a full backup (no incremental). Use for the initial backup or when you want to resend the complete dataset.
|
||||||
|
|
||||||
On subsequent runs, it automatically does incremental backups from the last bookmark.
|
```bash
|
||||||
|
zfs-client backup-full
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `backup-incremental`
|
||||||
|
Creates an incremental backup from the last bookmark. Requires an existing bookmark from a previous full backup.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client backup-incremental
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `snapshot <type>`
|
||||||
|
Creates a typed snapshot (hourly, daily, weekly, monthly) with automatic rotation. The rotation policy is fetched from the server if configured.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client snapshot hourly
|
||||||
|
zfs-client snapshot daily
|
||||||
|
zfs-client snapshot weekly
|
||||||
|
zfs-client snapshot monthly
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `rotate`
|
||||||
|
Rotates local snapshots based on the retention policy. If the server has a rotation policy configured, it will be used; otherwise, default values apply.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client rotate
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `rotate-remote`
|
||||||
|
Requests the server to rotate (delete old) remote snapshots to free up storage quota.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client rotate-remote
|
||||||
|
```
|
||||||
|
|
||||||
#### `status`
|
#### `status`
|
||||||
Displays the current backup status including storage usage, quota, and snapshot count from the server.
|
Displays the current backup status including storage usage, quota, and snapshot count from the server.
|
||||||
@@ -157,6 +182,13 @@ Displays the current backup status including storage usage, quota, and snapshot
|
|||||||
zfs-client status
|
zfs-client status
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `bookmarks`
|
||||||
|
Lists ZFS bookmarks on the local system. Bookmarks are used as reference points for incremental backups.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client bookmarks
|
||||||
|
```
|
||||||
|
|
||||||
#### `help`
|
#### `help`
|
||||||
Shows the help message with all available commands and options.
|
Shows the help message with all available commands and options.
|
||||||
|
|
||||||
@@ -164,23 +196,6 @@ Shows the help message with all available commands and options.
|
|||||||
zfs-client help
|
zfs-client help
|
||||||
```
|
```
|
||||||
|
|
||||||
### Client Configuration
|
|
||||||
|
|
||||||
```env
|
|
||||||
CLIENT_ID=client1
|
|
||||||
API_KEY=secret123
|
|
||||||
SERVER_URL=http://backup-server:8080
|
|
||||||
LOCAL_DATASET=tank/data
|
|
||||||
COMPRESS=true
|
|
||||||
|
|
||||||
# Optional: S3 direct upload (bypasses server)
|
|
||||||
S3_ENDPOINT=https://s3.amazonaws.com
|
|
||||||
S3_REGION=us-east-1
|
|
||||||
S3_BUCKET=zfs-backups
|
|
||||||
S3_ACCESS_KEY=your_access_key
|
|
||||||
S3_SECRET_KEY=your_secret_key
|
|
||||||
```
|
|
||||||
|
|
||||||
### Restore Tool Commands
|
### Restore Tool Commands
|
||||||
|
|
||||||
The `zfs-restore` tool provides commands for listing and restoring snapshots from the backup server:
|
The `zfs-restore` tool provides commands for listing and restoring snapshots from the backup server:
|
||||||
|
|||||||
BIN
zfs-client
BIN
zfs-client
Binary file not shown.
BIN
zfs-server
BIN
zfs-server
Binary file not shown.
Reference in New Issue
Block a user