server client restore
This commit is contained in:
29
.env
Normal file
29
.env
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# ===========================================
|
||||||
|
# ZFS Snapshot Manager Configuration
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# S3 Configuration (Server)
|
||||||
|
S3_ENABLED=true
|
||||||
|
S3_ENDPOINT=localhost:9000
|
||||||
|
S3_ACCESS_KEY=minioadmin
|
||||||
|
S3_SECRET_KEY=minioadmin
|
||||||
|
S3_BUCKET=zfs
|
||||||
|
S3_USE_SSL=false
|
||||||
|
|
||||||
|
# Local ZFS Configuration (Server)
|
||||||
|
ZFS_BASE_DATASET=backup
|
||||||
|
CONFIG_FILE=clients.json
|
||||||
|
METADATA_FILE=metadata.json
|
||||||
|
|
||||||
|
# Server Configuration
|
||||||
|
PORT=8080
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Client Configuration
|
||||||
|
# ===========================================
|
||||||
|
CLIENT_ID=client1
|
||||||
|
API_KEY=fcf730b6d95236ecd3c9fc2d92d7b6b2bb061514961aec041d6c7a7192f592e4
|
||||||
|
SERVER_URL=http://localhost:8080
|
||||||
|
LOCAL_DATASET=zfs/data
|
||||||
|
COMPRESS=true
|
||||||
|
STORAGE_TYPE=s3
|
||||||
16
clients.json
Normal file
16
clients.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"client_id": "client1",
|
||||||
|
"api_key": "fcf730b6d95236ecd3c9fc2d92d7b6b2bb061514961aec041d6c7a7192f592e4",
|
||||||
|
"max_size_bytes": 107374182400,
|
||||||
|
"dataset": "zfs/client1",
|
||||||
|
"enabled": true,
|
||||||
|
"storage_type": "s3",
|
||||||
|
"rotation_policy": {
|
||||||
|
"keep_hourly": 24,
|
||||||
|
"keep_daily": 7,
|
||||||
|
"keep_weekly": 4,
|
||||||
|
"keep_monthly": 12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
253
cmd/zfs-client/main.go
Normal file
253
cmd/zfs-client/main.go
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
// Command zfs-client is the CLI tool for creating and uploading ZFS snapshots.
|
||||||
|
// It provides commands for backup, status checking, snapshot rotation, and incremental backups.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"git.ma-al.com/goc_marek/zfs/internal/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) < 2 {
|
||||||
|
printUsage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration from environment and .env file
|
||||||
|
config := client.LoadConfig()
|
||||||
|
c := client.New(config)
|
||||||
|
|
||||||
|
command := os.Args[1]
|
||||||
|
|
||||||
|
switch command {
|
||||||
|
case "backup":
|
||||||
|
// Default: create manual backup (full or incremental)
|
||||||
|
fmt.Println("=== Creating and sending backup ===\n")
|
||||||
|
|
||||||
|
snapshot, err := c.CreateSnapshot()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.SendSnapshot(snapshot); err != nil {
|
||||||
|
fmt.Printf("Error sending snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n✓ Backup completed successfully!")
|
||||||
|
|
||||||
|
case "backup-full":
|
||||||
|
// Force full backup (no incremental)
|
||||||
|
fmt.Println("=== Creating full backup ===\n")
|
||||||
|
|
||||||
|
snapshot, err := c.CreateSnapshot()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.SendIncremental(snapshot, ""); err != nil {
|
||||||
|
fmt.Printf("Error sending snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create bookmark for future incremental backups
|
||||||
|
if err := c.CreateBookmark(snapshot); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n✓ Full backup completed successfully!")
|
||||||
|
|
||||||
|
case "backup-incremental":
|
||||||
|
// Incremental backup from last bookmark
|
||||||
|
fmt.Println("=== Creating incremental backup ===\n")
|
||||||
|
|
||||||
|
// Check for existing bookmark
|
||||||
|
lastBookmark, err := c.GetLastBookmark()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error checking bookmarks: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if lastBookmark == "" {
|
||||||
|
fmt.Println("No existing bookmark found. Use 'backup-full' for initial backup.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot, err := c.CreateSnapshot()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
|
||||||
|
fmt.Printf("Error sending incremental snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create bookmark for future incremental backups
|
||||||
|
if err := c.CreateBookmark(snapshot); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n✓ Incremental backup completed successfully!")
|
||||||
|
|
||||||
|
case "snapshot":
|
||||||
|
// Create typed snapshots (hourly, daily, weekly, monthly)
|
||||||
|
if len(os.Args) < 3 {
|
||||||
|
fmt.Println("Usage: zfs-client snapshot <hourly|daily|weekly|monthly>")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapType := client.SnapshotType(os.Args[2])
|
||||||
|
switch snapType {
|
||||||
|
case client.SnapshotHourly, client.SnapshotDaily, client.SnapshotWeekly, client.SnapshotMonthly:
|
||||||
|
// Valid type
|
||||||
|
default:
|
||||||
|
fmt.Printf("Invalid snapshot type: %s\n", snapType)
|
||||||
|
fmt.Println("Valid types: hourly, daily, weekly, monthly")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("=== Creating %s snapshot ===\n\n", snapType)
|
||||||
|
|
||||||
|
snapshot, err := c.CreateSnapshotWithType(snapType)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for existing bookmark for incremental
|
||||||
|
lastBookmark, _ := c.GetLastBookmark()
|
||||||
|
|
||||||
|
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
|
||||||
|
fmt.Printf("Error sending snapshot: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create bookmark
|
||||||
|
if err := c.CreateBookmark(snapshot); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rotate local snapshots using server policy if available
|
||||||
|
policy, err := getRotationPolicy(c)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
|
||||||
|
policy = client.DefaultPolicy()
|
||||||
|
}
|
||||||
|
if err := c.RotateLocalSnapshots(policy); err != nil {
|
||||||
|
fmt.Printf("Warning: failed to rotate snapshots: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n✓ %s snapshot completed successfully!\n", snapType)
|
||||||
|
|
||||||
|
case "rotate":
|
||||||
|
// Rotate local snapshots using server policy if available
|
||||||
|
fmt.Println("=== Rotating local snapshots ===\n")
|
||||||
|
|
||||||
|
policy, err := getRotationPolicy(c)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
|
||||||
|
policy = client.DefaultPolicy()
|
||||||
|
}
|
||||||
|
if err := c.RotateLocalSnapshots(policy); err != nil {
|
||||||
|
fmt.Printf("Error rotating snapshots: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n✓ Rotation completed!")
|
||||||
|
|
||||||
|
case "rotate-remote":
|
||||||
|
// Request server to rotate remote snapshots
|
||||||
|
if err := c.RequestRotation(); err != nil {
|
||||||
|
fmt.Printf("Error requesting rotation: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "status":
|
||||||
|
if err := c.GetStatus(); err != nil {
|
||||||
|
fmt.Printf("Error getting status: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "bookmarks":
|
||||||
|
// List bookmarks
|
||||||
|
fmt.Println("=== ZFS Bookmarks ===\n")
|
||||||
|
|
||||||
|
bookmark, err := c.GetLastBookmark()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bookmark == "" {
|
||||||
|
fmt.Println("No bookmarks found")
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Last bookmark: %s\n", bookmark)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "help", "-h", "--help":
|
||||||
|
printUsage()
|
||||||
|
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown command: %s\n", command)
|
||||||
|
printUsage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRotationPolicy fetches the rotation policy from the server.
|
||||||
|
// If the server has a policy configured, it must be used.
|
||||||
|
// Otherwise, the default policy is returned.
|
||||||
|
func getRotationPolicy(c *client.Client) (*client.SnapshotPolicy, error) {
|
||||||
|
serverPolicy, err := c.GetRotationPolicy()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if serverPolicy.ServerManaged && serverPolicy.RotationPolicy != nil {
|
||||||
|
fmt.Println(" Using server-managed rotation policy")
|
||||||
|
return serverPolicy.RotationPolicy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// No server policy, use default
|
||||||
|
fmt.Println(" Using default rotation policy")
|
||||||
|
return client.DefaultPolicy(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printUsage() {
|
||||||
|
fmt.Println("ZFS Snapshot Backup Client")
|
||||||
|
fmt.Println("\nUsage: zfs-client [command]")
|
||||||
|
fmt.Println("\nCommands:")
|
||||||
|
fmt.Println(" backup - Create snapshot and send (auto incremental if bookmark exists)")
|
||||||
|
fmt.Println(" backup-full - Create full backup (no incremental)")
|
||||||
|
fmt.Println(" backup-incremental - Create incremental backup from last bookmark")
|
||||||
|
fmt.Println(" snapshot <type> - Create typed snapshot (hourly|daily|weekly|monthly)")
|
||||||
|
fmt.Println(" rotate - Rotate local snapshots based on retention policy")
|
||||||
|
fmt.Println(" rotate-remote - Request server to rotate old remote snapshots")
|
||||||
|
fmt.Println(" status - Check server status and quota")
|
||||||
|
fmt.Println(" bookmarks - List ZFS bookmarks")
|
||||||
|
fmt.Println(" help - Show this help message")
|
||||||
|
fmt.Println("\nSnapshot Retention Policy (default):")
|
||||||
|
fmt.Println(" Hourly: 24 snapshots")
|
||||||
|
fmt.Println(" Daily: 7 snapshots")
|
||||||
|
fmt.Println(" Weekly: 4 snapshots")
|
||||||
|
fmt.Println(" Monthly: 12 snapshots")
|
||||||
|
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||||
|
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
||||||
|
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
||||||
|
fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)")
|
||||||
|
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
|
||||||
|
fmt.Println(" COMPRESS - Enable gzip compression (default: true)")
|
||||||
|
fmt.Println(" STORAGE_TYPE - Storage type: s3 or local (default: s3)")
|
||||||
|
fmt.Println("\nExamples:")
|
||||||
|
fmt.Println(" zfs-client backup")
|
||||||
|
fmt.Println(" zfs-client backup-full")
|
||||||
|
fmt.Println(" zfs-client snapshot hourly")
|
||||||
|
fmt.Println(" zfs-client rotate")
|
||||||
|
fmt.Println(" CLIENT_ID=myclient zfs-client backup")
|
||||||
|
}
|
||||||
178
cmd/zfs-restore/main.go
Normal file
178
cmd/zfs-restore/main.go
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
// Command zfs-restore is a CLI tool for restoring ZFS snapshots from a backup server.
|
||||||
|
// It provides commands for listing, restoring, and mounting snapshots.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"git.ma-al.com/goc_marek/zfs/internal/restore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) < 2 {
|
||||||
|
printUsage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration from environment and .env file
|
||||||
|
cfg := restore.LoadConfig()
|
||||||
|
client := restore.New(cfg.ClientID, cfg.APIKey, cfg.ServerURL)
|
||||||
|
|
||||||
|
command := os.Args[1]
|
||||||
|
|
||||||
|
switch command {
|
||||||
|
case "list":
|
||||||
|
snapshots, err := client.ListSnapshots()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
client.DisplaySnapshots(snapshots)
|
||||||
|
|
||||||
|
case "restore":
|
||||||
|
if len(os.Args) < 4 {
|
||||||
|
fmt.Println("Usage: zfs-restore restore <snapshot-number> <target-dataset> [--force]")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := client.ListSnapshots()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by timestamp (newest first)
|
||||||
|
sort.Slice(snapshots, func(i, j int) bool {
|
||||||
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Parse snapshot number
|
||||||
|
var snapNum int
|
||||||
|
fmt.Sscanf(os.Args[2], "%d", &snapNum)
|
||||||
|
|
||||||
|
if snapNum < 1 || snapNum > len(snapshots) {
|
||||||
|
fmt.Printf("Invalid snapshot number. Use 'list' to see available snapshots.\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot := snapshots[snapNum-1]
|
||||||
|
targetDataset := os.Args[3]
|
||||||
|
force := len(os.Args) > 4 && os.Args[4] == "--force"
|
||||||
|
|
||||||
|
if err := client.RestoreSnapshot(snapshot, targetDataset, force); err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "save":
|
||||||
|
if len(os.Args) < 4 {
|
||||||
|
fmt.Println("Usage: zfs-restore save <snapshot-number> <output-file>")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := client.ListSnapshots()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(snapshots, func(i, j int) bool {
|
||||||
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||||
|
})
|
||||||
|
|
||||||
|
var snapNum int
|
||||||
|
fmt.Sscanf(os.Args[2], "%d", &snapNum)
|
||||||
|
|
||||||
|
if snapNum < 1 || snapNum > len(snapshots) {
|
||||||
|
fmt.Printf("Invalid snapshot number.\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot := snapshots[snapNum-1]
|
||||||
|
outputFile := os.Args[3]
|
||||||
|
|
||||||
|
if err := client.RestoreToFile(snapshot, outputFile); err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "mount":
|
||||||
|
if len(os.Args) < 4 {
|
||||||
|
fmt.Println("Usage: zfs-restore mount <dataset> <mountpoint>")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset := os.Args[2]
|
||||||
|
mountpoint := os.Args[3]
|
||||||
|
|
||||||
|
if err := client.MountSnapshot(dataset, mountpoint); err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "latest":
|
||||||
|
if len(os.Args) < 3 {
|
||||||
|
fmt.Println("Usage: zfs-restore latest <target-dataset> [--force]")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := client.ListSnapshots()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshots) == 0 {
|
||||||
|
fmt.Println("No snapshots available")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort and get latest
|
||||||
|
sort.Slice(snapshots, func(i, j int) bool {
|
||||||
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||||
|
})
|
||||||
|
|
||||||
|
latest := snapshots[0]
|
||||||
|
targetDataset := os.Args[2]
|
||||||
|
force := len(os.Args) > 3 && os.Args[3] == "--force"
|
||||||
|
|
||||||
|
fmt.Printf("Restoring latest snapshot from %s\n", latest.Timestamp.Format("2006-01-02 15:04:05"))
|
||||||
|
|
||||||
|
if err := client.RestoreSnapshot(latest, targetDataset, force); err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "help", "-h", "--help":
|
||||||
|
printUsage()
|
||||||
|
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown command: %s\n", command)
|
||||||
|
printUsage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printUsage() {
|
||||||
|
fmt.Println("ZFS Snapshot Restore Tool")
|
||||||
|
fmt.Println("\nUsage: zfs-restore [command] [options]")
|
||||||
|
fmt.Println("\nCommands:")
|
||||||
|
fmt.Println(" list - List available snapshots")
|
||||||
|
fmt.Println(" restore <#> <dataset> [--force] - Restore snapshot to ZFS dataset")
|
||||||
|
fmt.Println(" latest <dataset> [--force] - Restore most recent snapshot")
|
||||||
|
fmt.Println(" save <#> <file> - Save snapshot to file")
|
||||||
|
fmt.Println(" mount <dataset> <mountpoint> - Mount restored dataset")
|
||||||
|
fmt.Println(" help - Show this help message")
|
||||||
|
fmt.Println("\nExamples:")
|
||||||
|
fmt.Println(" zfs-restore list")
|
||||||
|
fmt.Println(" zfs-restore restore 1 tank/restored")
|
||||||
|
fmt.Println(" zfs-restore latest tank/restored --force")
|
||||||
|
fmt.Println(" zfs-restore save 2 backup.zfs.gz")
|
||||||
|
fmt.Println(" zfs-restore mount tank/restored /mnt/restore")
|
||||||
|
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||||
|
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
||||||
|
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
||||||
|
fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)")
|
||||||
|
}
|
||||||
69
cmd/zfs-server/main.go
Normal file
69
cmd/zfs-server/main.go
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
// Command zfs-server is the main entry point for the ZFS snapshot backup server.
|
||||||
|
// It initializes the server with S3 or local storage and starts the HTTP API.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.ma-al.com/goc_marek/zfs/internal/server"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Load configuration from .env file and environment
|
||||||
|
cfg := server.LoadConfig()
|
||||||
|
|
||||||
|
// Initialize backends
|
||||||
|
var s3Backend *server.S3Backend
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if cfg.S3Enabled {
|
||||||
|
s3Backend, err = server.NewS3Backend(cfg.S3Endpoint, cfg.S3AccessKey, cfg.S3SecretKey, cfg.S3BucketName, cfg.S3UseSSL)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize S3 backend: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("Initialized S3 backend: %s/%s", cfg.S3Endpoint, cfg.S3BucketName)
|
||||||
|
} else {
|
||||||
|
log.Println("S3 backend disabled, using local ZFS storage only")
|
||||||
|
}
|
||||||
|
|
||||||
|
localBackend := server.NewLocalBackend(cfg.BaseDataset)
|
||||||
|
|
||||||
|
// Create metadata directory if needed
|
||||||
|
dir := cfg.MetadataFile
|
||||||
|
if idx := len(dir) - 1; idx > 0 {
|
||||||
|
for i := len(dir) - 1; i >= 0; i-- {
|
||||||
|
if dir[i] == '/' {
|
||||||
|
dir = dir[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
os.MkdirAll(dir, 0755)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := server.New(cfg.ConfigFile, cfg.MetadataFile, s3Backend, localBackend)
|
||||||
|
|
||||||
|
// Register HTTP routes
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
srv.RegisterRoutes(mux)
|
||||||
|
|
||||||
|
// Create HTTP server with timeouts for security and reliability
|
||||||
|
httpServer := &http.Server{
|
||||||
|
Addr: ":" + cfg.Port,
|
||||||
|
Handler: mux,
|
||||||
|
ReadTimeout: 30 * time.Second, // Prevent slowloris attacks
|
||||||
|
WriteTimeout: 30 * time.Minute, // Allow large file uploads
|
||||||
|
IdleTimeout: 120 * time.Second, // Close idle connections
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("ZFS Snapshot Server starting on port %s", cfg.Port)
|
||||||
|
log.Printf("Config file: %s", cfg.ConfigFile)
|
||||||
|
log.Printf("Metadata file: %s", cfg.MetadataFile)
|
||||||
|
log.Printf("S3 enabled: %v", cfg.S3Enabled)
|
||||||
|
|
||||||
|
if err := httpServer.ListenAndServe(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
30
go.mod
Normal file
30
go.mod
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
module git.ma-al.com/goc_marek/zfs
|
||||||
|
|
||||||
|
go 1.25.6
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/minio/minio-go/v7 v7.0.98
|
||||||
|
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
|
github.com/go-ini/ini v1.67.0 // indirect
|
||||||
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
|
github.com/klauspost/compress v1.18.2 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
|
||||||
|
github.com/klauspost/crc32 v1.3.0 // indirect
|
||||||
|
github.com/minio/crc64nvme v1.1.1 // indirect
|
||||||
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
|
github.com/philhofer/fwd v1.2.0 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/rs/xid v1.6.0 // indirect
|
||||||
|
github.com/tinylib/msgp v1.6.1 // indirect
|
||||||
|
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||||
|
golang.org/x/crypto v0.46.0 // indirect
|
||||||
|
golang.org/x/net v0.48.0 // indirect
|
||||||
|
golang.org/x/sys v0.39.0 // indirect
|
||||||
|
golang.org/x/text v0.32.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
)
|
||||||
47
go.sum
Normal file
47
go.sum
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
|
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||||
|
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||||
|
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||||
|
github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=
|
||||||
|
github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=
|
||||||
|
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
|
||||||
|
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||||
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.98 h1:MeAVKjLVz+XJ28zFcuYyImNSAh8Mq725uNW4beRisi0=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.98/go.mod h1:cY0Y+W7yozf0mdIclrttzo1Iiu7mEf9y7nk2uXqMOvM=
|
||||||
|
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||||
|
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||||
|
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
||||||
|
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||||
|
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||||
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
|
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
|
||||||
|
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
|
||||||
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
|
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||||
|
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||||
|
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||||
|
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||||
|
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||||
|
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
|
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||||
|
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
340
internal/client/client.go
Normal file
340
internal/client/client.go
Normal file
@@ -0,0 +1,340 @@
|
|||||||
|
// Package client provides ZFS snapshot backup client functionality.
|
||||||
|
// It handles creating snapshots and uploading them to a remote server.
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mistifyio/go-zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client handles snapshot backup operations to a remote server.
|
||||||
|
// It manages creating local ZFS snapshots and transmitting them
|
||||||
|
// to the backup server via HTTP or SSH.
|
||||||
|
type Client struct {
|
||||||
|
config *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Client instance with the provided configuration.
|
||||||
|
func New(config *Config) *Client {
|
||||||
|
return &Client{config: config}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
|
||||||
|
// The snapshot is named with a timestamp for easy identification.
|
||||||
|
// Returns the created snapshot dataset or an error.
|
||||||
|
func (c *Client) CreateSnapshot() (*zfs.Dataset, error) {
|
||||||
|
// Get the local dataset
|
||||||
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate snapshot name with timestamp
|
||||||
|
timestamp := time.Now().Format("2006-01-02_15:04:05")
|
||||||
|
snapshotName := fmt.Sprintf("backup_%s", timestamp)
|
||||||
|
|
||||||
|
// Create the snapshot
|
||||||
|
snapshot, err := ds.Snapshot(snapshotName, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Created local snapshot: %s@%s\n", c.config.LocalDataset, snapshotName)
|
||||||
|
return snapshot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSnapshotSize returns the used size of a snapshot in bytes.
|
||||||
|
func (c *Client) GetSnapshotSize(snapshot *zfs.Dataset) int64 {
|
||||||
|
return int64(snapshot.Used)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendSnapshot sends a snapshot to the backup server.
|
||||||
|
// It first requests upload authorization, then streams the snapshot
|
||||||
|
// using the appropriate method (S3 or ZFS receive).
|
||||||
|
func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||||
|
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||||
|
|
||||||
|
// Request upload authorization from server
|
||||||
|
uploadReq := map[string]interface{}{
|
||||||
|
"client_id": c.config.ClientID,
|
||||||
|
"api_key": c.config.APIKey,
|
||||||
|
"dataset_name": c.config.LocalDataset,
|
||||||
|
"timestamp": time.Now().Format(time.RFC3339),
|
||||||
|
"compressed": c.config.Compress,
|
||||||
|
"estimated_size": estimatedSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBody, _ := json.Marshal(uploadReq)
|
||||||
|
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to request upload: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Parse server response
|
||||||
|
var uploadResp struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
UploadURL string `json:"upload_url"`
|
||||||
|
UploadMethod string `json:"upload_method"`
|
||||||
|
StorageKey string `json:"storage_key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !uploadResp.Success {
|
||||||
|
return fmt.Errorf("upload not authorized: %s", uploadResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("→ Upload authorized\n")
|
||||||
|
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
||||||
|
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||||
|
|
||||||
|
// Choose upload method based on server response
|
||||||
|
if uploadResp.UploadMethod == "s3" {
|
||||||
|
return c.streamToS3(snapshot, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||||
|
}
|
||||||
|
return c.sendViaZFS(snapshot, uploadResp.StorageKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamToS3 streams a ZFS snapshot to S3 storage via HTTP.
|
||||||
|
// The snapshot is optionally compressed with gzip before transmission.
|
||||||
|
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
|
||||||
|
fmt.Printf("→ Streaming snapshot to S3...\n")
|
||||||
|
|
||||||
|
// Create ZFS send command
|
||||||
|
cmd := exec.Command("zfs", "send", snapshot.Name)
|
||||||
|
zfsOut, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create pipe: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start zfs send: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var reader io.Reader = zfsOut
|
||||||
|
|
||||||
|
// Apply gzip compression if enabled
|
||||||
|
if c.config.Compress {
|
||||||
|
fmt.Printf(" Compressing with gzip...\n")
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
gzWriter := gzip.NewWriter(pw)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer pw.Close()
|
||||||
|
defer gzWriter.Close()
|
||||||
|
io.Copy(gzWriter, zfsOut)
|
||||||
|
}()
|
||||||
|
|
||||||
|
reader = pr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HTTP request
|
||||||
|
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set required headers
|
||||||
|
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||||
|
req.Header.Set("X-Storage-Key", storageKey)
|
||||||
|
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||||
|
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||||
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
|
||||||
|
// Send request with no timeout for large uploads
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
httpResp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Process.Kill()
|
||||||
|
return fmt.Errorf("failed to upload: %v", err)
|
||||||
|
}
|
||||||
|
defer httpResp.Body.Close()
|
||||||
|
|
||||||
|
if httpResp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(httpResp.Body)
|
||||||
|
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("zfs send failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse response
|
||||||
|
var result struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Success {
|
||||||
|
return fmt.Errorf("upload failed: %s", result.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
||||||
|
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendViaZFS sends a snapshot via traditional ZFS send/receive over SSH.
|
||||||
|
// This method is used when the server uses local ZFS storage.
|
||||||
|
func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
|
||||||
|
fmt.Printf("→ Sending via ZFS send/receive...\n")
|
||||||
|
|
||||||
|
// Extract server host from URL
|
||||||
|
serverHost := c.config.ServerURL
|
||||||
|
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
||||||
|
serverHost = serverHost[7:]
|
||||||
|
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
||||||
|
serverHost = serverHost[8:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove port if present
|
||||||
|
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
||||||
|
serverHost = serverHost[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute ZFS send over SSH
|
||||||
|
cmd := exec.Command("sh", "-c",
|
||||||
|
fmt.Sprintf("zfs send %s | ssh %s 'zfs recv -F %s'",
|
||||||
|
snapshot.Name, serverHost, receivePath))
|
||||||
|
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Snapshot sent successfully!\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStatus retrieves and displays the client's backup status from the server.
|
||||||
|
// Shows storage usage, quota, and snapshot count.
|
||||||
|
func (c *Client) GetStatus() error {
|
||||||
|
url := fmt.Sprintf("%s/status?client_id=%s&api_key=%s",
|
||||||
|
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
||||||
|
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get status: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var status struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
TotalSnapshots int `json:"total_snapshots"`
|
||||||
|
UsedBytes int64 `json:"used_bytes"`
|
||||||
|
MaxBytes int64 `json:"max_bytes"`
|
||||||
|
PercentUsed float64 `json:"percent_used"`
|
||||||
|
StorageType string `json:"storage_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode status: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !status.Success {
|
||||||
|
return fmt.Errorf("status check failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n=== Server Status ===\n")
|
||||||
|
fmt.Printf("Storage Type: %s\n", status.StorageType)
|
||||||
|
fmt.Printf("Total Snapshots: %d\n", status.TotalSnapshots)
|
||||||
|
fmt.Printf("Used: %.2f GB / %.2f GB (%.1f%%)\n",
|
||||||
|
float64(status.UsedBytes)/(1024*1024*1024),
|
||||||
|
float64(status.MaxBytes)/(1024*1024*1024),
|
||||||
|
status.PercentUsed)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestRotation asks the server to rotate old snapshots.
|
||||||
|
// This deletes the oldest snapshots to free up space.
|
||||||
|
func (c *Client) RequestRotation() error {
|
||||||
|
reqBody, _ := json.Marshal(map[string]string{
|
||||||
|
"client_id": c.config.ClientID,
|
||||||
|
"api_key": c.config.APIKey,
|
||||||
|
})
|
||||||
|
|
||||||
|
resp, err := http.Post(c.config.ServerURL+"/rotate", "application/json", bytes.NewBuffer(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to request rotation: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var rotateResp struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
DeletedCount int `json:"deleted_count"`
|
||||||
|
ReclaimedBytes int64 `json:"reclaimed_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&rotateResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rotateResp.Success {
|
||||||
|
return fmt.Errorf("rotation failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Rotation complete\n")
|
||||||
|
fmt.Printf(" Deleted: %d snapshots\n", rotateResp.DeletedCount)
|
||||||
|
fmt.Printf(" Freed: %.2f GB\n", float64(rotateResp.ReclaimedBytes)/(1024*1024*1024))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerRotationPolicy represents the rotation policy response from the server
|
||||||
|
type ServerRotationPolicy struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
RotationPolicy *SnapshotPolicy `json:"rotation_policy"`
|
||||||
|
ServerManaged bool `json:"server_managed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRotationPolicy fetches the rotation policy from the server.
|
||||||
|
// If the server has a policy configured for this client, it must be used.
|
||||||
|
// Returns the policy and whether it's server-managed (mandatory).
|
||||||
|
func (c *Client) GetRotationPolicy() (*ServerRotationPolicy, error) {
|
||||||
|
url := fmt.Sprintf("%s/rotation-policy?client_id=%s&api_key=%s",
|
||||||
|
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
||||||
|
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get rotation policy: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var policyResp ServerRotationPolicy
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&policyResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !policyResp.Success {
|
||||||
|
return nil, fmt.Errorf("failed to get rotation policy: %s", policyResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &policyResp, nil
|
||||||
|
}
|
||||||
89
internal/client/config.go
Normal file
89
internal/client/config.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
// Package client provides ZFS snapshot backup client functionality.
|
||||||
|
// It handles creating snapshots and uploading them to a remote server.
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds client-side configuration for connecting to the backup server.
|
||||||
|
type Config struct {
|
||||||
|
// ClientID is the unique identifier for this client
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
// APIKey is the authentication key for the server
|
||||||
|
APIKey string `json:"api_key"`
|
||||||
|
// ServerURL is the base URL of the backup server
|
||||||
|
ServerURL string `json:"server_url"`
|
||||||
|
// LocalDataset is the ZFS dataset to backup
|
||||||
|
LocalDataset string `json:"local_dataset"`
|
||||||
|
// Compress enables gzip compression for transfers
|
||||||
|
Compress bool `json:"compress"`
|
||||||
|
// StorageType specifies the storage backend ("s3" or "local")
|
||||||
|
StorageType string `json:"storage_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfig loads client configuration from environment variables and .env file.
|
||||||
|
// Environment variables take precedence over .env file values.
|
||||||
|
func LoadConfig() *Config {
|
||||||
|
// Load .env file if exists
|
||||||
|
loadEnvFile(".env")
|
||||||
|
|
||||||
|
return &Config{
|
||||||
|
ClientID: getEnv("CLIENT_ID", "client1"),
|
||||||
|
APIKey: getEnv("API_KEY", "secret123"),
|
||||||
|
ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"),
|
||||||
|
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
|
||||||
|
Compress: getEnv("COMPRESS", "true") == "true",
|
||||||
|
StorageType: getEnv("STORAGE_TYPE", "s3"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadEnvFile loads key=value pairs from a .env file.
|
||||||
|
// Lines starting with # are treated as comments.
|
||||||
|
// Values can be quoted or unquoted.
|
||||||
|
func loadEnvFile(filename string) {
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return // .env file is optional
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if line == "" || strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse key=value
|
||||||
|
parts := strings.SplitN(line, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key := strings.TrimSpace(parts[0])
|
||||||
|
value := strings.TrimSpace(parts[1])
|
||||||
|
|
||||||
|
// Remove quotes if present
|
||||||
|
if len(value) >= 2 && (value[0] == '"' || value[0] == '\'') {
|
||||||
|
value = value[1 : len(value)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set if not already defined in environment
|
||||||
|
if os.Getenv(key) == "" {
|
||||||
|
os.Setenv(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEnv retrieves an environment variable with a default fallback value.
|
||||||
|
func getEnv(key, defaultValue string) string {
|
||||||
|
if value := os.Getenv(key); value != "" {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
377
internal/client/snapshot.go
Normal file
377
internal/client/snapshot.go
Normal file
@@ -0,0 +1,377 @@
|
|||||||
|
// Package client provides ZFS snapshot backup client functionality.
|
||||||
|
// This file contains snapshot management functions including creation,
|
||||||
|
// bookmarking, and rotation similar to zfs-auto-snapshot.
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os/exec"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mistifyio/go-zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SnapshotPolicy defines retention settings for automatic snapshots.
|
||||||
|
type SnapshotPolicy struct {
|
||||||
|
// KeepHourly is the number of hourly snapshots to keep
|
||||||
|
KeepHourly int
|
||||||
|
// KeepDaily is the number of daily snapshots to keep
|
||||||
|
KeepDaily int
|
||||||
|
// KeepWeekly is the number of weekly snapshots to keep
|
||||||
|
KeepWeekly int
|
||||||
|
// KeepMonthly is the number of monthly snapshots to keep
|
||||||
|
KeepMonthly int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPolicy returns the default snapshot retention policy.
|
||||||
|
func DefaultPolicy() *SnapshotPolicy {
|
||||||
|
return &SnapshotPolicy{
|
||||||
|
KeepHourly: 24,
|
||||||
|
KeepDaily: 7,
|
||||||
|
KeepWeekly: 4,
|
||||||
|
KeepMonthly: 12,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotType represents the type of snapshot (hourly, daily, etc.)
|
||||||
|
type SnapshotType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
SnapshotHourly SnapshotType = "hourly"
|
||||||
|
SnapshotDaily SnapshotType = "daily"
|
||||||
|
SnapshotWeekly SnapshotType = "weekly"
|
||||||
|
SnapshotMonthly SnapshotType = "monthly"
|
||||||
|
SnapshotManual SnapshotType = "manual"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateSnapshotWithType creates a snapshot with a specific type label.
|
||||||
|
// The snapshot name follows the pattern: zfs-backup-<type>-<timestamp>
|
||||||
|
func (c *Client) CreateSnapshotWithType(snapshotType SnapshotType) (*zfs.Dataset, error) {
|
||||||
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := time.Now().Format("2006-01-02_15-04-05")
|
||||||
|
snapshotName := fmt.Sprintf("zfs-backup-%s-%s", snapshotType, timestamp)
|
||||||
|
|
||||||
|
snapshot, err := ds.Snapshot(snapshotName, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Created %s snapshot: %s@%s\n", snapshotType, c.config.LocalDataset, snapshotName)
|
||||||
|
return snapshot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBookmark creates a ZFS bookmark from a snapshot.
|
||||||
|
// Bookmarks allow incremental sends even after the source snapshot is deleted.
|
||||||
|
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
|
||||||
|
// Extract snapshot name from full path (dataset@snapshot -> snapshot)
|
||||||
|
parts := strings.Split(snapshot.Name, "@")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return fmt.Errorf("invalid snapshot name format: %s", snapshot.Name)
|
||||||
|
}
|
||||||
|
snapshotName := parts[1]
|
||||||
|
bookmarkName := fmt.Sprintf("%s#%s", c.config.LocalDataset, snapshotName)
|
||||||
|
|
||||||
|
// Create bookmark using zfs command
|
||||||
|
cmd := exec.Command("zfs", "bookmark", snapshot.Name, bookmarkName)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create bookmark: %v: %s", err, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Created bookmark: %s\n", bookmarkName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLastBookmark returns the most recent bookmark for the dataset.
|
||||||
|
// Bookmarks are used as the base for incremental sends.
|
||||||
|
func (c *Client) GetLastBookmark() (string, error) {
|
||||||
|
// List all bookmarks for the dataset
|
||||||
|
cmd := exec.Command("zfs", "list", "-t", "bookmark", "-o", "name", "-H", "-r", c.config.LocalDataset)
|
||||||
|
output, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil // No bookmarks yet
|
||||||
|
}
|
||||||
|
|
||||||
|
bookmarks := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||||
|
if len(bookmarks) == 0 || bookmarks[0] == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the last bookmark (most recent)
|
||||||
|
return bookmarks[len(bookmarks)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLastSnapshot returns the most recent snapshot for the dataset.
|
||||||
|
func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) {
|
||||||
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := ds.Snapshots()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshots) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the last snapshot (most recent)
|
||||||
|
return snapshots[len(snapshots)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendIncremental sends an incremental stream from a bookmark or snapshot.
|
||||||
|
// If base is empty, sends a full stream.
|
||||||
|
func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
|
||||||
|
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||||
|
|
||||||
|
// Determine if this is incremental or full
|
||||||
|
isIncremental := base != ""
|
||||||
|
var uploadMethod string
|
||||||
|
if isIncremental {
|
||||||
|
uploadMethod = "incremental"
|
||||||
|
} else {
|
||||||
|
uploadMethod = "full"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request upload authorization from server
|
||||||
|
uploadReq := map[string]interface{}{
|
||||||
|
"client_id": c.config.ClientID,
|
||||||
|
"api_key": c.config.APIKey,
|
||||||
|
"dataset_name": c.config.LocalDataset,
|
||||||
|
"timestamp": time.Now().Format(time.RFC3339),
|
||||||
|
"compressed": c.config.Compress,
|
||||||
|
"estimated_size": estimatedSize,
|
||||||
|
"incremental": isIncremental,
|
||||||
|
"base_snapshot": base,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBody, _ := json.Marshal(uploadReq)
|
||||||
|
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to request upload: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var uploadResp struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
UploadURL string `json:"upload_url"`
|
||||||
|
UploadMethod string `json:"upload_method"`
|
||||||
|
StorageKey string `json:"storage_key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !uploadResp.Success {
|
||||||
|
return fmt.Errorf("upload not authorized: %s", uploadResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("→ Upload authorized\n")
|
||||||
|
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
||||||
|
fmt.Printf(" Type: %s\n", uploadMethod)
|
||||||
|
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||||
|
|
||||||
|
return c.streamIncrementalToS3(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3.
|
||||||
|
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
||||||
|
fmt.Printf("→ Streaming snapshot to S3...\n")
|
||||||
|
|
||||||
|
// Create ZFS send command
|
||||||
|
var cmd *exec.Cmd
|
||||||
|
if base != "" {
|
||||||
|
// Incremental send from bookmark or snapshot
|
||||||
|
fmt.Printf(" Base: %s\n", base)
|
||||||
|
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
|
||||||
|
} else {
|
||||||
|
// Full send
|
||||||
|
cmd = exec.Command("zfs", "send", snapshot.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
zfsOut, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create pipe: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start zfs send: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var reader io.Reader = zfsOut
|
||||||
|
|
||||||
|
// Apply gzip compression if enabled
|
||||||
|
if c.config.Compress {
|
||||||
|
fmt.Printf(" Compressing with gzip...\n")
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
gzWriter := gzip.NewWriter(pw)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer pw.Close()
|
||||||
|
defer gzWriter.Close()
|
||||||
|
io.Copy(gzWriter, zfsOut)
|
||||||
|
}()
|
||||||
|
|
||||||
|
reader = pr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HTTP request
|
||||||
|
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set required headers
|
||||||
|
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||||
|
req.Header.Set("X-Storage-Key", storageKey)
|
||||||
|
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||||
|
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||||
|
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
|
||||||
|
if base != "" {
|
||||||
|
req.Header.Set("X-Base-Snapshot", base)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
|
||||||
|
// Send request with no timeout for large uploads
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
httpResp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Process.Kill()
|
||||||
|
return fmt.Errorf("failed to upload: %v", err)
|
||||||
|
}
|
||||||
|
defer httpResp.Body.Close()
|
||||||
|
|
||||||
|
if httpResp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(httpResp.Body)
|
||||||
|
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("zfs send failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse response
|
||||||
|
var result struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Success {
|
||||||
|
return fmt.Errorf("upload failed: %s", result.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
||||||
|
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RotateLocalSnapshots removes old snapshots based on the retention policy.
|
||||||
|
// This is similar to zfs-auto-snapshot's rotation behavior.
|
||||||
|
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
|
||||||
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get dataset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots, err := ds.Snapshots()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group snapshots by type
|
||||||
|
groups := make(map[SnapshotType][]*zfs.Dataset)
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
snapType := parseSnapshotType(snap.Name)
|
||||||
|
groups[snapType] = append(groups[snapType], snap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply retention policy
|
||||||
|
deletedCount := 0
|
||||||
|
keepCount := map[SnapshotType]int{
|
||||||
|
SnapshotHourly: policy.KeepHourly,
|
||||||
|
SnapshotDaily: policy.KeepDaily,
|
||||||
|
SnapshotWeekly: policy.KeepWeekly,
|
||||||
|
SnapshotMonthly: policy.KeepMonthly,
|
||||||
|
SnapshotManual: -1, // Keep all manual snapshots
|
||||||
|
}
|
||||||
|
|
||||||
|
for snapType, snaps := range groups {
|
||||||
|
maxKeep := keepCount[snapType]
|
||||||
|
if maxKeep < 0 {
|
||||||
|
continue // Keep all
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by creation time (oldest first)
|
||||||
|
sortSnapshotsByTime(snaps)
|
||||||
|
|
||||||
|
// Delete oldest snapshots exceeding the limit
|
||||||
|
if len(snaps) > maxKeep {
|
||||||
|
toDelete := snaps[:len(snaps)-maxKeep]
|
||||||
|
for _, snap := range toDelete {
|
||||||
|
fmt.Printf(" Deleting old snapshot: %s\n", snap.Name)
|
||||||
|
if err := snap.Destroy(zfs.DestroyDefault); err != nil {
|
||||||
|
fmt.Printf(" Warning: failed to delete %s: %v\n", snap.Name, err)
|
||||||
|
} else {
|
||||||
|
deletedCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if deletedCount > 0 {
|
||||||
|
fmt.Printf("✓ Rotated %d local snapshots\n", deletedCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSnapshotType extracts the snapshot type from the snapshot name.
|
||||||
|
func parseSnapshotType(name string) SnapshotType {
|
||||||
|
if strings.Contains(name, "hourly") {
|
||||||
|
return SnapshotHourly
|
||||||
|
}
|
||||||
|
if strings.Contains(name, "daily") {
|
||||||
|
return SnapshotDaily
|
||||||
|
}
|
||||||
|
if strings.Contains(name, "weekly") {
|
||||||
|
return SnapshotWeekly
|
||||||
|
}
|
||||||
|
if strings.Contains(name, "monthly") {
|
||||||
|
return SnapshotMonthly
|
||||||
|
}
|
||||||
|
return SnapshotManual
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortSnapshotsByTime sorts snapshots by creation time (oldest first).
|
||||||
|
// Uses the snapshot name which contains timestamp for sorting.
|
||||||
|
func sortSnapshotsByTime(snaps []*zfs.Dataset) {
|
||||||
|
sort.Slice(snaps, func(i, j int) bool {
|
||||||
|
// Extract timestamp from snapshot name for comparison
|
||||||
|
// Names are like: dataset@zfs-backup-hourly-2006-01-02_15-04-05
|
||||||
|
return snaps[i].Name < snaps[j].Name
|
||||||
|
})
|
||||||
|
}
|
||||||
80
internal/restore/config.go
Normal file
80
internal/restore/config.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Package restore provides functionality for restoring ZFS snapshots from a backup server.
|
||||||
|
// It supports restoring to ZFS datasets, saving to files, and mounting restored datasets.
|
||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds restore client configuration.
|
||||||
|
type Config struct {
|
||||||
|
// ClientID is the unique identifier for this client
|
||||||
|
ClientID string
|
||||||
|
// APIKey is the authentication key for the server
|
||||||
|
APIKey string
|
||||||
|
// ServerURL is the base URL of the backup server
|
||||||
|
ServerURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfig loads restore client configuration from environment variables and .env file.
|
||||||
|
// Environment variables take precedence over .env file values.
|
||||||
|
func LoadConfig() *Config {
|
||||||
|
// Load .env file if exists
|
||||||
|
loadEnvFile(".env")
|
||||||
|
|
||||||
|
return &Config{
|
||||||
|
ClientID: getEnv("CLIENT_ID", "client1"),
|
||||||
|
APIKey: getEnv("API_KEY", "secret123"),
|
||||||
|
ServerURL: getEnv("SERVER_URL", "http://localhost:8080"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadEnvFile loads key=value pairs from a .env file.
|
||||||
|
// Lines starting with # are treated as comments.
|
||||||
|
// Values can be quoted or unquoted.
|
||||||
|
func loadEnvFile(filename string) {
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return // .env file is optional
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if line == "" || strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse key=value
|
||||||
|
parts := strings.SplitN(line, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key := strings.TrimSpace(parts[0])
|
||||||
|
value := strings.TrimSpace(parts[1])
|
||||||
|
|
||||||
|
// Remove quotes if present
|
||||||
|
if len(value) >= 2 && (value[0] == '"' || value[0] == '\'') {
|
||||||
|
value = value[1 : len(value)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set if not already defined in environment
|
||||||
|
if os.Getenv(key) == "" {
|
||||||
|
os.Setenv(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEnv retrieves an environment variable with a default fallback value.
|
||||||
|
func getEnv(key, defaultValue string) string {
|
||||||
|
if value := os.Getenv(key); value != "" {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
284
internal/restore/restore.go
Normal file
284
internal/restore/restore.go
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
// Package restore provides functionality for restoring ZFS snapshots from a backup server.
|
||||||
|
// It supports restoring to ZFS datasets, saving to files, and mounting restored datasets.
|
||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mistifyio/go-zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SnapshotMetadata represents snapshot information from the server.
|
||||||
|
type SnapshotMetadata struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
SnapshotID string `json:"snapshot_id"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
SizeBytes int64 `json:"size_bytes"`
|
||||||
|
DatasetName string `json:"dataset_name"`
|
||||||
|
StorageKey string `json:"storage_key"`
|
||||||
|
StorageType string `json:"storage_type"`
|
||||||
|
Compressed bool `json:"compressed"`
|
||||||
|
Incremental bool `json:"incremental"`
|
||||||
|
BaseSnapshot string `json:"base_snapshot,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client handles restore operations from the backup server.
|
||||||
|
type Client struct {
|
||||||
|
ClientID string
|
||||||
|
APIKey string
|
||||||
|
ServerURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new restore client instance.
|
||||||
|
func New(clientID, apiKey, serverURL string) *Client {
|
||||||
|
return &Client{
|
||||||
|
ClientID: clientID,
|
||||||
|
APIKey: apiKey,
|
||||||
|
ServerURL: serverURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSnapshots retrieves all available snapshots from the server.
|
||||||
|
func (c *Client) ListSnapshots() ([]*SnapshotMetadata, error) {
|
||||||
|
url := fmt.Sprintf("%s/status?client_id=%s&api_key=%s",
|
||||||
|
c.ServerURL, c.ClientID, c.APIKey)
|
||||||
|
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get snapshots: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var status struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Snapshots []*SnapshotMetadata `json:"snapshots"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !status.Success {
|
||||||
|
return nil, fmt.Errorf("failed to list snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
|
return status.Snapshots, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisplaySnapshots prints a formatted list of available snapshots.
|
||||||
|
// Snapshots are sorted by timestamp (newest first).
|
||||||
|
func (c *Client) DisplaySnapshots(snapshots []*SnapshotMetadata) {
|
||||||
|
if len(snapshots) == 0 {
|
||||||
|
fmt.Println("No snapshots available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by timestamp (newest first)
|
||||||
|
sort.Slice(snapshots, func(i, j int) bool {
|
||||||
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||||
|
})
|
||||||
|
|
||||||
|
fmt.Printf("\n=== Available Snapshots ===\n\n")
|
||||||
|
fmt.Printf("%-4s %-25s %-20s %-12s %-10s %s\n",
|
||||||
|
"#", "Timestamp", "Dataset", "Size", "Storage", "ID")
|
||||||
|
fmt.Println(strings.Repeat("-", 100))
|
||||||
|
|
||||||
|
for i, snap := range snapshots {
|
||||||
|
age := time.Since(snap.Timestamp)
|
||||||
|
sizeGB := float64(snap.SizeBytes) / (1024 * 1024 * 1024)
|
||||||
|
compressed := ""
|
||||||
|
if snap.Compressed {
|
||||||
|
compressed = " (gz)"
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%-4d %-25s %-20s %7.2f GB %-10s %s%s\n",
|
||||||
|
i+1,
|
||||||
|
snap.Timestamp.Format("2006-01-02 15:04:05"),
|
||||||
|
truncate(snap.DatasetName, 20),
|
||||||
|
sizeGB,
|
||||||
|
snap.StorageType,
|
||||||
|
truncate(snap.SnapshotID, 30),
|
||||||
|
compressed)
|
||||||
|
|
||||||
|
if age < 24*time.Hour {
|
||||||
|
fmt.Printf(" (created %s ago)\n", formatDuration(age))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestoreSnapshot downloads and restores a snapshot to a local ZFS dataset.
|
||||||
|
// If force is true, existing datasets will be overwritten.
|
||||||
|
func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool) error {
|
||||||
|
fmt.Printf("\n=== Restoring Snapshot ===\n")
|
||||||
|
fmt.Printf("Source: %s\n", snapshot.SnapshotID)
|
||||||
|
fmt.Printf("Target: %s\n", targetDataset)
|
||||||
|
fmt.Printf("Size: %.2f GB\n", float64(snapshot.SizeBytes)/(1024*1024*1024))
|
||||||
|
fmt.Printf("Storage: %s\n", snapshot.StorageType)
|
||||||
|
fmt.Printf("Compressed: %v\n\n", snapshot.Compressed)
|
||||||
|
|
||||||
|
// Check if target dataset exists
|
||||||
|
if !force {
|
||||||
|
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
||||||
|
return fmt.Errorf("target dataset %s already exists. Use --force to overwrite", targetDataset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request download from server
|
||||||
|
downloadURL := fmt.Sprintf("%s/download?client_id=%s&api_key=%s&snapshot_id=%s",
|
||||||
|
c.ServerURL, c.ClientID, c.APIKey, snapshot.SnapshotID)
|
||||||
|
|
||||||
|
fmt.Printf("→ Downloading from server...\n")
|
||||||
|
|
||||||
|
resp, err := http.Get(downloadURL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to download: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return fmt.Errorf("download failed: %s", body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create decompression reader if needed
|
||||||
|
var reader io.Reader = resp.Body
|
||||||
|
if snapshot.Compressed {
|
||||||
|
fmt.Printf(" Decompressing...\n")
|
||||||
|
gzReader, err := gzip.NewReader(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create gzip reader: %v", err)
|
||||||
|
}
|
||||||
|
defer gzReader.Close()
|
||||||
|
reader = gzReader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive into ZFS
|
||||||
|
fmt.Printf("→ Receiving into ZFS dataset...\n")
|
||||||
|
|
||||||
|
var cmd *exec.Cmd
|
||||||
|
if force {
|
||||||
|
cmd = exec.Command("zfs", "recv", "-F", targetDataset)
|
||||||
|
} else {
|
||||||
|
cmd = exec.Command("zfs", "recv", targetDataset)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Stdin = reader
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("zfs recv failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n✓ Snapshot restored successfully!\n")
|
||||||
|
fmt.Printf(" Dataset: %s\n", targetDataset)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestoreToFile downloads a snapshot and saves it to a file.
|
||||||
|
// This is useful for archiving or manual inspection.
|
||||||
|
func (c *Client) RestoreToFile(snapshot *SnapshotMetadata, outputFile string) error {
|
||||||
|
fmt.Printf("\n=== Saving Snapshot to File ===\n")
|
||||||
|
fmt.Printf("Source: %s\n", snapshot.SnapshotID)
|
||||||
|
fmt.Printf("Output: %s\n", outputFile)
|
||||||
|
fmt.Printf("Size: %.2f GB\n\n", float64(snapshot.SizeBytes)/(1024*1024*1024))
|
||||||
|
|
||||||
|
// Request download
|
||||||
|
downloadURL := fmt.Sprintf("%s/download?client_id=%s&api_key=%s&snapshot_id=%s",
|
||||||
|
c.ServerURL, c.ClientID, c.APIKey, snapshot.SnapshotID)
|
||||||
|
|
||||||
|
fmt.Printf("→ Downloading...\n")
|
||||||
|
|
||||||
|
resp, err := http.Get(downloadURL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to download: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return fmt.Errorf("download failed: %s", body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create output file
|
||||||
|
outFile, err := os.Create(outputFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create file: %v", err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
// Copy with progress
|
||||||
|
written, err := io.Copy(outFile, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to save: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n✓ Snapshot saved successfully!\n")
|
||||||
|
fmt.Printf(" Size: %.2f MB\n", float64(written)/(1024*1024))
|
||||||
|
fmt.Printf(" File: %s\n", outputFile)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MountSnapshot mounts a restored dataset to a specified mountpoint.
|
||||||
|
// This allows browsing the restored files.
|
||||||
|
func (c *Client) MountSnapshot(dataset, mountpoint string) error {
|
||||||
|
ds, err := zfs.GetDataset(dataset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("dataset not found: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mountpoint if it doesn't exist
|
||||||
|
if err := os.MkdirAll(mountpoint, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create mountpoint: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set mountpoint property
|
||||||
|
if err := ds.SetProperty("mountpoint", mountpoint); err != nil {
|
||||||
|
return fmt.Errorf("failed to set mountpoint: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount the dataset
|
||||||
|
cmd := exec.Command("zfs", "mount", dataset)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("failed to mount: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Mounted %s at %s\n", dataset, mountpoint)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
|
||||||
|
// truncate shortens a string to maxLen characters with ellipsis.
|
||||||
|
func truncate(s string, maxLen int) string {
|
||||||
|
if len(s) <= maxLen {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return s[:maxLen-3] + "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatDuration converts a duration to a human-readable string.
|
||||||
|
func formatDuration(d time.Duration) string {
|
||||||
|
if d < time.Minute {
|
||||||
|
return fmt.Sprintf("%.0f seconds", d.Seconds())
|
||||||
|
} else if d < time.Hour {
|
||||||
|
return fmt.Sprintf("%.0f minutes", d.Minutes())
|
||||||
|
} else if d < 24*time.Hour {
|
||||||
|
return fmt.Sprintf("%.1f hours", d.Hours())
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%.1f days", d.Hours()/24)
|
||||||
|
}
|
||||||
|
}
|
||||||
95
internal/server/config.go
Normal file
95
internal/server/config.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds application configuration from environment and .env file
|
||||||
|
type Config struct {
|
||||||
|
S3Endpoint string
|
||||||
|
S3AccessKey string
|
||||||
|
S3SecretKey string
|
||||||
|
S3BucketName string
|
||||||
|
S3UseSSL bool
|
||||||
|
S3Enabled bool // Enable/disable S3 backend
|
||||||
|
BaseDataset string
|
||||||
|
ConfigFile string
|
||||||
|
MetadataFile string
|
||||||
|
Port string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfig loads configuration from .env file and environment variables
|
||||||
|
func LoadConfig() *Config {
|
||||||
|
// Load .env file if exists
|
||||||
|
loadEnvFile(".env")
|
||||||
|
|
||||||
|
s3Enabled := getEnv("S3_ENABLED", "true") == "true"
|
||||||
|
s3AccessKey := os.Getenv("S3_ACCESS_KEY")
|
||||||
|
s3SecretKey := os.Getenv("S3_SECRET_KEY")
|
||||||
|
|
||||||
|
// Disable S3 if credentials are missing
|
||||||
|
if s3AccessKey == "" || s3SecretKey == "" {
|
||||||
|
s3Enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Config{
|
||||||
|
S3Endpoint: getEnv("S3_ENDPOINT", "s3.amazonaws.com"),
|
||||||
|
S3AccessKey: s3AccessKey,
|
||||||
|
S3SecretKey: s3SecretKey,
|
||||||
|
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
|
||||||
|
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
|
||||||
|
S3Enabled: s3Enabled,
|
||||||
|
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
|
||||||
|
ConfigFile: getEnv("CONFIG_FILE", "clients.json"),
|
||||||
|
MetadataFile: getEnv("METADATA_FILE", "metadata.json"),
|
||||||
|
Port: getEnv("PORT", "8080"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadEnvFile loads key=value pairs from a .env file
|
||||||
|
func loadEnvFile(filename string) {
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return // .env file is optional
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if line == "" || strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse key=value
|
||||||
|
parts := strings.SplitN(line, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key := strings.TrimSpace(parts[0])
|
||||||
|
value := strings.TrimSpace(parts[1])
|
||||||
|
|
||||||
|
// Remove quotes if present
|
||||||
|
if len(value) >= 2 && (value[0] == '"' || value[0] == '\'') {
|
||||||
|
value = value[1 : len(value)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set if not already defined in environment
|
||||||
|
if os.Getenv(key) == "" {
|
||||||
|
os.Setenv(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEnv gets an environment variable with a default value
|
||||||
|
func getEnv(key, defaultValue string) string {
|
||||||
|
if value := os.Getenv(key); value != "" {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
583
internal/server/server.go
Normal file
583
internal/server/server.go
Normal file
@@ -0,0 +1,583 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server manages snapshots from multiple clients with S3 support
|
||||||
|
type Server struct {
|
||||||
|
clients map[string]*ClientConfig
|
||||||
|
snapshots map[string][]*SnapshotMetadata
|
||||||
|
mu sync.RWMutex
|
||||||
|
s3Backend *S3Backend
|
||||||
|
localBackend *LocalBackend
|
||||||
|
metadataFile string
|
||||||
|
configFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new snapshot server
|
||||||
|
func New(configFile, metadataFile string, s3Backend *S3Backend, localBackend *LocalBackend) *Server {
|
||||||
|
s := &Server{
|
||||||
|
clients: make(map[string]*ClientConfig),
|
||||||
|
snapshots: make(map[string][]*SnapshotMetadata),
|
||||||
|
s3Backend: s3Backend,
|
||||||
|
localBackend: localBackend,
|
||||||
|
metadataFile: metadataFile,
|
||||||
|
configFile: configFile,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.loadConfig()
|
||||||
|
s.loadMetadata()
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) loadConfig() {
|
||||||
|
data, err := os.ReadFile(s.configFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Warning: Could not read config file: %v", err)
|
||||||
|
// Create default config
|
||||||
|
s.clients["client1"] = &ClientConfig{
|
||||||
|
ClientID: "client1",
|
||||||
|
APIKey: hashAPIKey("secret123"),
|
||||||
|
MaxSizeBytes: 100 * 1024 * 1024 * 1024,
|
||||||
|
Dataset: "backup/client1",
|
||||||
|
Enabled: true,
|
||||||
|
StorageType: "s3",
|
||||||
|
}
|
||||||
|
s.saveConfig()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var clients []*ClientConfig
|
||||||
|
if err := json.Unmarshal(data, &clients); err != nil {
|
||||||
|
log.Printf("Error parsing config: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, client := range clients {
|
||||||
|
s.clients[client.ClientID] = client
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Loaded %d client configurations", len(s.clients))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) saveConfig() {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
var clients []*ClientConfig
|
||||||
|
for _, client := range s.clients {
|
||||||
|
clients = append(clients, client)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := json.MarshalIndent(clients, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error marshaling config: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(s.configFile, data, 0600); err != nil {
|
||||||
|
log.Printf("Error writing config: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) loadMetadata() {
|
||||||
|
data, err := os.ReadFile(s.metadataFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("No existing metadata file, starting fresh")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &s.snapshots); err != nil {
|
||||||
|
log.Printf("Error parsing metadata: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
totalSnapshots := 0
|
||||||
|
for _, snaps := range s.snapshots {
|
||||||
|
totalSnapshots += len(snaps)
|
||||||
|
}
|
||||||
|
log.Printf("Loaded metadata for %d snapshots", totalSnapshots)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) saveMetadata() {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
data, err := json.MarshalIndent(s.snapshots, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error marshaling metadata: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(s.metadataFile, data, 0600); err != nil {
|
||||||
|
log.Printf("Error writing metadata: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) authenticate(clientID, apiKey string) bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
client, exists := s.clients[clientID]
|
||||||
|
if !exists || !client.Enabled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.APIKey == hashAPIKey(apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) getClientUsage(clientID string) int64 {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
var total int64
|
||||||
|
for _, snap := range s.snapshots[clientID] {
|
||||||
|
total += snap.SizeBytes
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) canAcceptSnapshot(clientID string, estimatedSize int64) (bool, string) {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
client, exists := s.clients[clientID]
|
||||||
|
if !exists {
|
||||||
|
return false, "Client not found"
|
||||||
|
}
|
||||||
|
|
||||||
|
currentUsage := s.getClientUsage(clientID)
|
||||||
|
|
||||||
|
if currentUsage+estimatedSize > client.MaxSizeBytes {
|
||||||
|
return false, fmt.Sprintf("Quota exceeded: using %d/%d bytes",
|
||||||
|
currentUsage, client.MaxSizeBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, "OK"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) rotateSnapshots(clientID string) (int, int64) {
|
||||||
|
// First pass: collect snapshots to delete while holding lock
|
||||||
|
s.mu.Lock()
|
||||||
|
client, exists := s.clients[clientID]
|
||||||
|
if !exists {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots := s.snapshots[clientID]
|
||||||
|
if len(snapshots) == 0 {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by timestamp (oldest first)
|
||||||
|
sort.Slice(snapshots, func(i, j int) bool {
|
||||||
|
return snapshots[i].Timestamp.Before(snapshots[j].Timestamp)
|
||||||
|
})
|
||||||
|
|
||||||
|
currentUsage := int64(0)
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
currentUsage += snap.SizeBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect snapshots to delete
|
||||||
|
var toDelete []*SnapshotMetadata
|
||||||
|
for currentUsage > client.MaxSizeBytes && len(snapshots) > 1 {
|
||||||
|
oldest := snapshots[0]
|
||||||
|
toDelete = append(toDelete, oldest)
|
||||||
|
currentUsage -= oldest.SizeBytes
|
||||||
|
snapshots = snapshots[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update state before I/O
|
||||||
|
s.snapshots[clientID] = snapshots
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
if len(toDelete) == 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select appropriate backend
|
||||||
|
var backend StorageBackend
|
||||||
|
if client.StorageType == "s3" {
|
||||||
|
backend = s.s3Backend
|
||||||
|
} else {
|
||||||
|
backend = s.localBackend
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass: delete without holding lock
|
||||||
|
deletedCount := 0
|
||||||
|
reclaimedBytes := int64(0)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
for _, snap := range toDelete {
|
||||||
|
if err := backend.Delete(ctx, snap.StorageKey); err != nil {
|
||||||
|
log.Printf("Error deleting snapshot %s: %v", snap.StorageKey, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Rotated out snapshot: %s (freed %d bytes)", snap.StorageKey, snap.SizeBytes)
|
||||||
|
reclaimedBytes += snap.SizeBytes
|
||||||
|
deletedCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save metadata after deletions
|
||||||
|
s.saveMetadata()
|
||||||
|
|
||||||
|
return deletedCount, reclaimedBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP Handlers
|
||||||
|
|
||||||
|
// HandleUpload handles snapshot upload requests
|
||||||
|
func (s *Server) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodPost {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req UploadRequest
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
respondJSON(w, http.StatusBadRequest, UploadResponse{
|
||||||
|
Success: false,
|
||||||
|
Message: "Invalid request",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.authenticate(req.ClientID, req.APIKey) {
|
||||||
|
respondJSON(w, http.StatusUnauthorized, UploadResponse{
|
||||||
|
Success: false,
|
||||||
|
Message: "Authentication failed",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check quota
|
||||||
|
estimatedSize := req.EstimatedSize
|
||||||
|
if estimatedSize == 0 {
|
||||||
|
estimatedSize = 1 * 1024 * 1024 * 1024 // Default 1GB estimate
|
||||||
|
}
|
||||||
|
|
||||||
|
canAccept, msg := s.canAcceptSnapshot(req.ClientID, estimatedSize)
|
||||||
|
if !canAccept {
|
||||||
|
respondJSON(w, http.StatusForbidden, UploadResponse{
|
||||||
|
Success: false,
|
||||||
|
Message: msg,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.RLock()
|
||||||
|
client := s.clients[req.ClientID]
|
||||||
|
s.mu.RUnlock()
|
||||||
|
|
||||||
|
timestamp := time.Now().Format("2006-01-02_15:04:05")
|
||||||
|
|
||||||
|
if client.StorageType == "s3" {
|
||||||
|
// S3 upload
|
||||||
|
storageKey := fmt.Sprintf("%s/%s_%s.zfs", req.ClientID, req.DatasetName, timestamp)
|
||||||
|
if req.Compressed {
|
||||||
|
storageKey += ".gz"
|
||||||
|
}
|
||||||
|
|
||||||
|
respondJSON(w, http.StatusOK, UploadResponse{
|
||||||
|
Success: true,
|
||||||
|
Message: "Ready to receive snapshot",
|
||||||
|
UploadMethod: "s3",
|
||||||
|
StorageKey: storageKey,
|
||||||
|
UploadURL: fmt.Sprintf("/upload-stream/%s", req.ClientID),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// Local ZFS receive
|
||||||
|
snapshotName := fmt.Sprintf("%s@%s_%s", client.Dataset, req.ClientID, timestamp)
|
||||||
|
respondJSON(w, http.StatusOK, UploadResponse{
|
||||||
|
Success: true,
|
||||||
|
Message: "Ready to receive snapshot",
|
||||||
|
UploadMethod: "zfs-receive",
|
||||||
|
StorageKey: snapshotName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleUploadStream handles streaming snapshot uploads
|
||||||
|
func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodPost {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract client ID from URL
|
||||||
|
parts := strings.Split(r.URL.Path, "/")
|
||||||
|
if len(parts) < 3 {
|
||||||
|
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
clientID := parts[2]
|
||||||
|
|
||||||
|
// Get metadata from headers
|
||||||
|
apiKey := r.Header.Get("X-API-Key")
|
||||||
|
storageKey := r.Header.Get("X-Storage-Key")
|
||||||
|
datasetName := r.Header.Get("X-Dataset-Name")
|
||||||
|
compressedStr := r.Header.Get("X-Compressed")
|
||||||
|
incrementalStr := r.Header.Get("X-Incremental")
|
||||||
|
baseSnapshot := r.Header.Get("X-Base-Snapshot")
|
||||||
|
|
||||||
|
if !s.authenticate(clientID, apiKey) {
|
||||||
|
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Upload to S3
|
||||||
|
size := r.ContentLength
|
||||||
|
if size < 0 {
|
||||||
|
size = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
|
||||||
|
log.Printf("Error uploading to S3: %v", err)
|
||||||
|
http.Error(w, "Upload failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get actual size after upload
|
||||||
|
actualSize, err := s.s3Backend.GetSize(ctx, storageKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error getting object size: %v", err)
|
||||||
|
actualSize = size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save metadata
|
||||||
|
s.mu.Lock()
|
||||||
|
metadata := &SnapshotMetadata{
|
||||||
|
ClientID: clientID,
|
||||||
|
SnapshotID: storageKey,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
SizeBytes: actualSize,
|
||||||
|
DatasetName: datasetName,
|
||||||
|
StorageKey: storageKey,
|
||||||
|
StorageType: "s3",
|
||||||
|
Compressed: compressedStr == "true",
|
||||||
|
Incremental: incrementalStr == "true",
|
||||||
|
BaseSnapshot: baseSnapshot,
|
||||||
|
}
|
||||||
|
s.snapshots[clientID] = append(s.snapshots[clientID], metadata)
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
s.saveMetadata()
|
||||||
|
|
||||||
|
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||||
|
"success": true,
|
||||||
|
"message": "Snapshot uploaded successfully",
|
||||||
|
"size": actualSize,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleStatus handles status requests
|
||||||
|
func (s *Server) HandleStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
clientID := r.URL.Query().Get("client_id")
|
||||||
|
apiKey := r.URL.Query().Get("api_key")
|
||||||
|
|
||||||
|
if !s.authenticate(clientID, apiKey) {
|
||||||
|
respondJSON(w, http.StatusUnauthorized, StatusResponse{Success: false})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.RLock()
|
||||||
|
client := s.clients[clientID]
|
||||||
|
snapshots := s.snapshots[clientID]
|
||||||
|
s.mu.RUnlock()
|
||||||
|
|
||||||
|
usedBytes := s.getClientUsage(clientID)
|
||||||
|
percentUsed := float64(usedBytes) / float64(client.MaxSizeBytes) * 100
|
||||||
|
|
||||||
|
respondJSON(w, http.StatusOK, StatusResponse{
|
||||||
|
Success: true,
|
||||||
|
TotalSnapshots: len(snapshots),
|
||||||
|
UsedBytes: usedBytes,
|
||||||
|
MaxBytes: client.MaxSizeBytes,
|
||||||
|
PercentUsed: percentUsed,
|
||||||
|
Snapshots: snapshots,
|
||||||
|
StorageType: client.StorageType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRotate handles snapshot rotation requests
|
||||||
|
func (s *Server) HandleRotate(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodPost {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
APIKey string `json:"api_key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, "Invalid request", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.authenticate(req.ClientID, req.APIKey) {
|
||||||
|
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
deletedCount, reclaimedBytes := s.rotateSnapshots(req.ClientID)
|
||||||
|
|
||||||
|
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||||
|
"success": true,
|
||||||
|
"deleted_count": deletedCount,
|
||||||
|
"reclaimed_bytes": reclaimedBytes,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleDownload handles snapshot download requests
|
||||||
|
func (s *Server) HandleDownload(w http.ResponseWriter, r *http.Request) {
|
||||||
|
clientID := r.URL.Query().Get("client_id")
|
||||||
|
apiKey := r.URL.Query().Get("api_key")
|
||||||
|
snapshotID := r.URL.Query().Get("snapshot_id")
|
||||||
|
|
||||||
|
if !s.authenticate(clientID, apiKey) {
|
||||||
|
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find snapshot metadata
|
||||||
|
s.mu.RLock()
|
||||||
|
client := s.clients[clientID]
|
||||||
|
var targetSnapshot *SnapshotMetadata
|
||||||
|
for _, snap := range s.snapshots[clientID] {
|
||||||
|
if snap.SnapshotID == snapshotID {
|
||||||
|
targetSnapshot = snap
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.mu.RUnlock()
|
||||||
|
|
||||||
|
if targetSnapshot == nil {
|
||||||
|
http.Error(w, "Snapshot not found", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
var backend StorageBackend
|
||||||
|
|
||||||
|
if client.StorageType == "s3" {
|
||||||
|
backend = s.s3Backend
|
||||||
|
} else {
|
||||||
|
backend = s.localBackend
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download from storage
|
||||||
|
reader, err := backend.Download(ctx, targetSnapshot.StorageKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error downloading snapshot: %v", err)
|
||||||
|
http.Error(w, "Download failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
// Stream to client
|
||||||
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", targetSnapshot.StorageKey))
|
||||||
|
|
||||||
|
if _, err := io.Copy(w, reader); err != nil {
|
||||||
|
log.Printf("Error streaming snapshot: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleHealth handles health check requests
|
||||||
|
func (s *Server) HandleHealth(w http.ResponseWriter, r *http.Request) {
|
||||||
|
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||||
|
"status": "healthy",
|
||||||
|
"time": time.Now(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleRotationPolicy handles rotation policy requests from clients.
|
||||||
|
// Returns the rotation policy configured for the client, if any.
|
||||||
|
// If a policy is set, the client must use it and cannot override it.
|
||||||
|
func (s *Server) HandleRotationPolicy(w http.ResponseWriter, r *http.Request) {
|
||||||
|
clientID := r.URL.Query().Get("client_id")
|
||||||
|
apiKey := r.URL.Query().Get("api_key")
|
||||||
|
|
||||||
|
if !s.authenticate(clientID, apiKey) {
|
||||||
|
respondJSON(w, http.StatusUnauthorized, RotationPolicyResponse{
|
||||||
|
Success: false,
|
||||||
|
Message: "Authentication failed",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.RLock()
|
||||||
|
client, exists := s.clients[clientID]
|
||||||
|
s.mu.RUnlock()
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
respondJSON(w, http.StatusNotFound, RotationPolicyResponse{
|
||||||
|
Success: false,
|
||||||
|
Message: "Client not found",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if server-managed rotation policy is configured
|
||||||
|
if client.RotationPolicy != nil {
|
||||||
|
respondJSON(w, http.StatusOK, RotationPolicyResponse{
|
||||||
|
Success: true,
|
||||||
|
Message: "Server-managed rotation policy",
|
||||||
|
RotationPolicy: client.RotationPolicy,
|
||||||
|
ServerManaged: true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// No server-managed policy - client can use its own defaults
|
||||||
|
respondJSON(w, http.StatusOK, RotationPolicyResponse{
|
||||||
|
Success: true,
|
||||||
|
Message: "No server-managed policy, client can use defaults",
|
||||||
|
RotationPolicy: nil,
|
||||||
|
ServerManaged: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterRoutes registers all HTTP routes
|
||||||
|
func (s *Server) RegisterRoutes(mux *http.ServeMux) {
|
||||||
|
mux.HandleFunc("/upload", s.HandleUpload)
|
||||||
|
mux.HandleFunc("/upload-stream/", s.HandleUploadStream)
|
||||||
|
mux.HandleFunc("/status", s.HandleStatus)
|
||||||
|
mux.HandleFunc("/rotate", s.HandleRotate)
|
||||||
|
mux.HandleFunc("/download", s.HandleDownload)
|
||||||
|
mux.HandleFunc("/health", s.HandleHealth)
|
||||||
|
mux.HandleFunc("/rotation-policy", s.HandleRotationPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func respondJSON(w http.ResponseWriter, status int, data interface{}) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(status)
|
||||||
|
json.NewEncoder(w).Encode(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hashAPIKey(key string) string {
|
||||||
|
hash := sha256.Sum256([]byte(key))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
}
|
||||||
197
internal/server/storage.go
Normal file
197
internal/server/storage.go
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/v7"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
"github.com/mistifyio/go-zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StorageBackend defines the interface for different storage types
|
||||||
|
type StorageBackend interface {
|
||||||
|
Upload(ctx context.Context, key string, data io.Reader, size int64) error
|
||||||
|
Download(ctx context.Context, key string) (io.ReadCloser, error)
|
||||||
|
Delete(ctx context.Context, key string) error
|
||||||
|
List(ctx context.Context, prefix string) ([]string, error)
|
||||||
|
GetSize(ctx context.Context, key string) (int64, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// S3Backend implements StorageBackend for S3-compatible storage
|
||||||
|
type S3Backend struct {
|
||||||
|
client *minio.Client
|
||||||
|
bucketName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewS3Backend creates a new S3 storage backend
|
||||||
|
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
|
||||||
|
client, err := minio.New(endpoint, &minio.Options{
|
||||||
|
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
|
||||||
|
Secure: useSSL,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create S3 client: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure bucket exists
|
||||||
|
ctx := context.Background()
|
||||||
|
exists, err := client.BucketExists(ctx, bucketName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to check bucket: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create bucket: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("Created S3 bucket: %s", bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &S3Backend{
|
||||||
|
client: client,
|
||||||
|
bucketName: bucketName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload uploads data to S3
|
||||||
|
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
|
||||||
|
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
|
||||||
|
minio.PutObjectOptions{
|
||||||
|
ContentType: "application/octet-stream",
|
||||||
|
PartSize: 10 * 1024 * 1024, // 10MB parts
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download retrieves data from S3
|
||||||
|
func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||||
|
obj, err := s.client.GetObject(ctx, s.bucketName, key, minio.GetObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes an object from S3
|
||||||
|
func (s *S3Backend) Delete(ctx context.Context, key string) error {
|
||||||
|
return s.client.RemoveObject(ctx, s.bucketName, key, minio.RemoveObjectOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns all objects with the given prefix
|
||||||
|
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||||
|
var keys []string
|
||||||
|
|
||||||
|
objectCh := s.client.ListObjects(ctx, s.bucketName, minio.ListObjectsOptions{
|
||||||
|
Prefix: prefix,
|
||||||
|
Recursive: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
for object := range objectCh {
|
||||||
|
if object.Err != nil {
|
||||||
|
return nil, object.Err
|
||||||
|
}
|
||||||
|
keys = append(keys, object.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSize returns the size of an object in S3
|
||||||
|
func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) {
|
||||||
|
info, err := s.client.StatObject(ctx, s.bucketName, key, minio.StatObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return info.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalBackend implements StorageBackend for local ZFS storage
|
||||||
|
type LocalBackend struct {
|
||||||
|
baseDataset string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLocalBackend creates a new local ZFS storage backend
|
||||||
|
func NewLocalBackend(baseDataset string) *LocalBackend {
|
||||||
|
return &LocalBackend{baseDataset: baseDataset}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload is not supported for local backend
|
||||||
|
func (l *LocalBackend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
|
||||||
|
return fmt.Errorf("local backend upload not supported via storage interface, use zfs receive endpoint")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download creates a zfs send stream
|
||||||
|
func (l *LocalBackend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||||
|
cmd := exec.CommandContext(ctx, "zfs", "send", key)
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cmdReadCloser{stdout: stdout, cmd: cmd}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete destroys a ZFS dataset
|
||||||
|
func (l *LocalBackend) Delete(ctx context.Context, key string) error {
|
||||||
|
ds, err := zfs.GetDataset(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ds.Destroy(zfs.DestroyDefault)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns all snapshots with the given prefix
|
||||||
|
func (l *LocalBackend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||||
|
snapshots, err := zfs.Snapshots(prefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var names []string
|
||||||
|
for _, snap := range snapshots {
|
||||||
|
names = append(names, snap.Name)
|
||||||
|
}
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSize returns the used size of a ZFS dataset
|
||||||
|
func (l *LocalBackend) GetSize(ctx context.Context, key string) (int64, error) {
|
||||||
|
ds, err := zfs.GetDataset(key)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int64(ds.Used), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cmdReadCloser wraps stdout pipe to properly wait for command completion
|
||||||
|
type cmdReadCloser struct {
|
||||||
|
stdout io.ReadCloser
|
||||||
|
cmd *exec.Cmd
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmdReadCloser) Read(p []byte) (int, error) {
|
||||||
|
return c.stdout.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmdReadCloser) Close() error {
|
||||||
|
if c.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.closed = true
|
||||||
|
err := c.stdout.Close()
|
||||||
|
waitErr := c.cmd.Wait()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
87
internal/server/types.go
Normal file
87
internal/server/types.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClientConfig holds client authentication and quota information
|
||||||
|
type ClientConfig struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
APIKey string `json:"api_key"`
|
||||||
|
MaxSizeBytes int64 `json:"max_size_bytes"`
|
||||||
|
Dataset string `json:"dataset"`
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
StorageType string `json:"storage_type"` // "s3" or "local"
|
||||||
|
// RotationPolicy defines the snapshot retention policy for this client
|
||||||
|
// If set, the client must use this policy and cannot override it
|
||||||
|
RotationPolicy *RotationPolicy `json:"rotation_policy,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RotationPolicy defines retention settings for automatic snapshots.
|
||||||
|
// When set on the server, clients must use this policy for local rotation.
|
||||||
|
type RotationPolicy struct {
|
||||||
|
// KeepHourly is the number of hourly snapshots to keep
|
||||||
|
KeepHourly int `json:"keep_hourly"`
|
||||||
|
// KeepDaily is the number of daily snapshots to keep
|
||||||
|
KeepDaily int `json:"keep_daily"`
|
||||||
|
// KeepWeekly is the number of weekly snapshots to keep
|
||||||
|
KeepWeekly int `json:"keep_weekly"`
|
||||||
|
// KeepMonthly is the number of monthly snapshots to keep
|
||||||
|
KeepMonthly int `json:"keep_monthly"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotMetadata represents snapshot information
|
||||||
|
type SnapshotMetadata struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
SnapshotID string `json:"snapshot_id"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
SizeBytes int64 `json:"size_bytes"`
|
||||||
|
DatasetName string `json:"dataset_name"`
|
||||||
|
StorageKey string `json:"storage_key"`
|
||||||
|
StorageType string `json:"storage_type"`
|
||||||
|
Compressed bool `json:"compressed"`
|
||||||
|
Incremental bool `json:"incremental"`
|
||||||
|
BaseSnapshot string `json:"base_snapshot,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadRequest represents a snapshot upload request
|
||||||
|
type UploadRequest struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
APIKey string `json:"api_key"`
|
||||||
|
DatasetName string `json:"dataset_name"`
|
||||||
|
Timestamp string `json:"timestamp"`
|
||||||
|
Compressed bool `json:"compressed"`
|
||||||
|
EstimatedSize int64 `json:"estimated_size"`
|
||||||
|
Incremental bool `json:"incremental"`
|
||||||
|
BaseSnapshot string `json:"base_snapshot,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse represents the response to an upload request
|
||||||
|
type UploadResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
UploadURL string `json:"upload_url,omitempty"`
|
||||||
|
UploadMethod string `json:"upload_method,omitempty"` // "s3" or "zfs-receive"
|
||||||
|
StorageKey string `json:"storage_key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusResponse represents the response to a status request
|
||||||
|
type StatusResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
TotalSnapshots int `json:"total_snapshots"`
|
||||||
|
UsedBytes int64 `json:"used_bytes"`
|
||||||
|
MaxBytes int64 `json:"max_bytes"`
|
||||||
|
PercentUsed float64 `json:"percent_used"`
|
||||||
|
Snapshots []*SnapshotMetadata `json:"snapshots"`
|
||||||
|
StorageType string `json:"storage_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RotationPolicyResponse represents the response to a rotation policy request
|
||||||
|
type RotationPolicyResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
RotationPolicy *RotationPolicy `json:"rotation_policy,omitempty"`
|
||||||
|
// ServerManaged indicates if the policy is managed by the server
|
||||||
|
// If true, client must use this policy and cannot override it
|
||||||
|
ServerManaged bool `json:"server_managed"`
|
||||||
|
}
|
||||||
431
readme.md
Normal file
431
readme.md
Normal file
@@ -0,0 +1,431 @@
|
|||||||
|
# ZFS Snapshot Manager
|
||||||
|
|
||||||
|
A distributed ZFS snapshot management system with S3-compatible storage support. This project provides client, server, and restore tools for managing ZFS snapshots across multiple machines.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **S3 Storage Support**: Store snapshots in any S3-compatible storage (AWS S3, MinIO, Backblaze B2, Wasabi, DigitalOcean Spaces)
|
||||||
|
- **Local ZFS Storage**: Option to use local ZFS datasets for maximum performance
|
||||||
|
- **Multi-client Architecture**: Support for multiple clients with isolated storage and per-client quotas
|
||||||
|
- **Automatic Compression**: Gzip compression for reduced storage costs
|
||||||
|
- **Snapshot Rotation**: Automatic cleanup of old snapshots based on quota
|
||||||
|
- **Server-Managed Rotation Policies**: Centralized control of client rotation policies - clients must use server-configured retention settings
|
||||||
|
- **API Key Authentication**: Secure client-server communication
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
zfs/
|
||||||
|
├── cmd/
|
||||||
|
│ ├── zfs-server/ # Server executable
|
||||||
|
│ ├── zfs-client/ # Client executable
|
||||||
|
│ └── zfs-restore/ # Restore tool executable
|
||||||
|
├── internal/
|
||||||
|
│ ├── server/ # Server package (config, storage, HTTP handlers)
|
||||||
|
│ ├── client/ # Client package (snapshot creation, upload)
|
||||||
|
│ └── restore/ # Restore package (download, restore operations)
|
||||||
|
├── go.mod
|
||||||
|
├── go.sum
|
||||||
|
├── .env # Configuration file
|
||||||
|
└── readme.md
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Using Go Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install server
|
||||||
|
go install git.ma-al.com/goc_marek/zfs/cmd/zfs-server@latest
|
||||||
|
|
||||||
|
# Install client
|
||||||
|
go install git.ma-al.com/goc_marek/zfs/cmd/zfs-client@latest
|
||||||
|
|
||||||
|
# Install restore tool
|
||||||
|
go install git.ma-al.com/goc_marek/zfs/cmd/zfs-restore@latest
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build from Source
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repository
|
||||||
|
git clone https://git.ma-al.com/goc_marek/zfs.git
|
||||||
|
cd zfs
|
||||||
|
|
||||||
|
# Build all binaries
|
||||||
|
go build -o bin/zfs-server ./cmd/zfs-server
|
||||||
|
go build -o bin/zfs-client ./cmd/zfs-client
|
||||||
|
go build -o bin/zfs-restore ./cmd/zfs-restore
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Server Configuration
|
||||||
|
|
||||||
|
Create a `.env` file in the working directory:
|
||||||
|
|
||||||
|
```env
|
||||||
|
# S3 Configuration
|
||||||
|
S3_ENABLED=true
|
||||||
|
S3_ENDPOINT=s3.amazonaws.com
|
||||||
|
S3_ACCESS_KEY=YOUR_ACCESS_KEY
|
||||||
|
S3_SECRET_KEY=YOUR_SECRET_KEY
|
||||||
|
S3_BUCKET=zfs-snapshots
|
||||||
|
S3_USE_SSL=true
|
||||||
|
|
||||||
|
# Local ZFS fallback
|
||||||
|
ZFS_BASE_DATASET=backup
|
||||||
|
|
||||||
|
# Server settings
|
||||||
|
CONFIG_FILE=clients.json
|
||||||
|
METADATA_FILE=metadata.json
|
||||||
|
PORT=8080
|
||||||
|
```
|
||||||
|
|
||||||
|
### Client Configuration
|
||||||
|
|
||||||
|
```env
|
||||||
|
CLIENT_ID=client1
|
||||||
|
API_KEY=secret123
|
||||||
|
SERVER_URL=http://backup-server:8080
|
||||||
|
LOCAL_DATASET=tank/data
|
||||||
|
COMPRESS=true
|
||||||
|
STORAGE_TYPE=s3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Restore Tool Configuration
|
||||||
|
|
||||||
|
```env
|
||||||
|
CLIENT_ID=client1
|
||||||
|
API_KEY=secret123
|
||||||
|
SERVER_URL=http://backup-server:8080
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start the backup server
|
||||||
|
zfs-server
|
||||||
|
|
||||||
|
# The server listens on port 8080 by default
|
||||||
|
# Endpoints:
|
||||||
|
# POST /upload - Request upload authorization
|
||||||
|
# POST /upload-stream/ - Stream snapshot data
|
||||||
|
# GET /status - Check client status
|
||||||
|
# POST /rotate - Rotate old snapshots
|
||||||
|
# GET /download - Download a snapshot
|
||||||
|
# GET /rotation-policy - Get client rotation policy
|
||||||
|
# GET /health - Health check
|
||||||
|
```
|
||||||
|
|
||||||
|
### Client Commands
|
||||||
|
|
||||||
|
The `zfs-client` tool provides the following commands for managing ZFS snapshots:
|
||||||
|
|
||||||
|
#### `backup`
|
||||||
|
Creates a snapshot and sends it to the server. Automatically uses incremental backup if a bookmark exists.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client backup
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `backup-full`
|
||||||
|
Forces a full backup (no incremental). Use for the initial backup or when you want to resend the complete dataset.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client backup-full
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `backup-incremental`
|
||||||
|
Creates an incremental backup from the last bookmark. Requires an existing bookmark from a previous full backup.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client backup-incremental
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `snapshot <type>`
|
||||||
|
Creates a typed snapshot (hourly, daily, weekly, monthly) with automatic rotation. The rotation policy is fetched from the server if configured.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client snapshot hourly
|
||||||
|
zfs-client snapshot daily
|
||||||
|
zfs-client snapshot weekly
|
||||||
|
zfs-client snapshot monthly
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `rotate`
|
||||||
|
Rotates local snapshots based on the retention policy. If the server has a rotation policy configured, it will be used; otherwise, default values apply.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client rotate
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `rotate-remote`
|
||||||
|
Requests the server to rotate (delete old) remote snapshots to free up storage quota.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client rotate-remote
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `status`
|
||||||
|
Displays the current backup status including storage usage, quota, and snapshot count from the server.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client status
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `bookmarks`
|
||||||
|
Lists ZFS bookmarks on the local system. Bookmarks are used as reference points for incremental backups.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client bookmarks
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `help`
|
||||||
|
Shows the help message with all available commands and options.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-client help
|
||||||
|
```
|
||||||
|
|
||||||
|
### Restore Tool Commands
|
||||||
|
|
||||||
|
The `zfs-restore` tool provides commands for listing and restoring snapshots from the backup server:
|
||||||
|
|
||||||
|
#### `list`
|
||||||
|
Lists all available snapshots for the configured client from the server.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-restore list
|
||||||
|
```
|
||||||
|
|
||||||
|
Output example:
|
||||||
|
```
|
||||||
|
# Snapshot ID Timestamp Size
|
||||||
|
1 client1/tank_data_2024-02-13 2024-02-13 14:30 1.2 GB
|
||||||
|
2 client1/tank_data_2024-02-12 2024-02-12 14:30 1.1 GB
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `restore <number> <dataset>`
|
||||||
|
Restores a snapshot by its list number to a specified ZFS dataset.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-restore restore 1 tank/restored
|
||||||
|
```
|
||||||
|
|
||||||
|
Options:
|
||||||
|
- `--force` or `-f` - Overwrite existing dataset if it exists
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-restore restore 1 tank/restored --force
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `latest <dataset>`
|
||||||
|
Restores the most recent snapshot to a specified dataset.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-restore latest tank/restored
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `save <number> <filename>`
|
||||||
|
Downloads a snapshot and saves it to a local file without restoring.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-restore save 1 backup.zfs.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `mount <dataset> <mountpoint>`
|
||||||
|
Mounts a restored ZFS dataset to a specified directory for file access.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
zfs-restore mount tank/restored /mnt/restore
|
||||||
|
```
|
||||||
|
|
||||||
|
## S3 Provider Configuration
|
||||||
|
|
||||||
|
### AWS S3
|
||||||
|
|
||||||
|
```env
|
||||||
|
S3_ENDPOINT=s3.amazonaws.com
|
||||||
|
S3_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE
|
||||||
|
S3_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
|
S3_BUCKET=my-zfs-backups
|
||||||
|
S3_USE_SSL=true
|
||||||
|
```
|
||||||
|
|
||||||
|
### MinIO (Self-Hosted)
|
||||||
|
|
||||||
|
```env
|
||||||
|
S3_ENDPOINT=minio.example.com:9000
|
||||||
|
S3_ACCESS_KEY=minioadmin
|
||||||
|
S3_SECRET_KEY=minioadmin
|
||||||
|
S3_BUCKET=zfs-snapshots
|
||||||
|
S3_USE_SSL=false
|
||||||
|
```
|
||||||
|
|
||||||
|
### Backblaze B2
|
||||||
|
|
||||||
|
```env
|
||||||
|
S3_ENDPOINT=s3.us-west-000.backblazeb2.com
|
||||||
|
S3_ACCESS_KEY=your_key_id
|
||||||
|
S3_SECRET_KEY=your_application_key
|
||||||
|
S3_BUCKET=zfs-backups
|
||||||
|
S3_USE_SSL=true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Wasabi
|
||||||
|
|
||||||
|
```env
|
||||||
|
S3_ENDPOINT=s3.wasabisys.com
|
||||||
|
S3_ACCESS_KEY=your_access_key
|
||||||
|
S3_SECRET_KEY=your_secret_key
|
||||||
|
S3_BUCKET=zfs-backups
|
||||||
|
S3_USE_SSL=true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Client Configuration File
|
||||||
|
|
||||||
|
The server maintains a `clients.json` file with client configurations:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"client_id": "client1",
|
||||||
|
"api_key": "hashed_key",
|
||||||
|
"max_size_bytes": 107374182400,
|
||||||
|
"dataset": "backup/client1",
|
||||||
|
"enabled": true,
|
||||||
|
"storage_type": "s3",
|
||||||
|
"rotation_policy": {
|
||||||
|
"keep_hourly": 24,
|
||||||
|
"keep_daily": 7,
|
||||||
|
"keep_weekly": 4,
|
||||||
|
"keep_monthly": 12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Server-Managed Rotation Policy
|
||||||
|
|
||||||
|
When `rotation_policy` is configured for a client in `clients.json`, the client **must** use this policy and cannot override it. This enables centralized control of snapshot retention policies:
|
||||||
|
|
||||||
|
- **Server-Managed**: If `rotation_policy` is set, the client fetches the policy from the server and applies it
|
||||||
|
- **Client-Autonomous**: If no `rotation_policy` is set, the client uses its default policy
|
||||||
|
|
||||||
|
The rotation policy fields are:
|
||||||
|
- `keep_hourly`: Number of hourly snapshots to keep (default: 24)
|
||||||
|
- `keep_daily`: Number of daily snapshots to keep (default: 7)
|
||||||
|
- `keep_weekly`: Number of weekly snapshots to keep (default: 4)
|
||||||
|
- `keep_monthly`: Number of monthly snapshots to keep (default: 12)
|
||||||
|
|
||||||
|
#### API Endpoint
|
||||||
|
|
||||||
|
The server exposes a `/rotation-policy` endpoint for clients to fetch their configured policy:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GET /rotation-policy?client_id=client1&api_key=secret123
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Server-managed rotation policy",
|
||||||
|
"rotation_policy": {
|
||||||
|
"keep_hourly": 24,
|
||||||
|
"keep_daily": 7,
|
||||||
|
"keep_weekly": 4,
|
||||||
|
"keep_monthly": 12
|
||||||
|
},
|
||||||
|
"server_managed": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────┐ ZFS send ┌──────────────────┐
|
||||||
|
│ Client 1 │───────┬─────────▶│ Backup Server │
|
||||||
|
│ (S3 mode) │ │ │ │
|
||||||
|
└─────────────┘ │ │ ┌────────────┐ │
|
||||||
|
│ │ │ S3 Backend │ │
|
||||||
|
┌─────────────┐ │ HTTP │ └─────┬──────┘ │
|
||||||
|
│ Client 2 │───────┤ Stream │ │ │
|
||||||
|
│ (S3 mode) │ │ │ ▼ │
|
||||||
|
└─────────────┘ │ │ ┌────────────┐ │
|
||||||
|
│ │ │ MinIO │ │
|
||||||
|
┌─────────────┐ │ │ │ or │ │
|
||||||
|
│ Client 3 │───────┘ │ │ AWS S3 │ │
|
||||||
|
│ (Local ZFS) │─────────────────▶│ └────────────┘ │
|
||||||
|
└─────────────┘ ZFS recv │ │
|
||||||
|
│ ┌────────────┐ │
|
||||||
|
│ │ Local ZFS │ │
|
||||||
|
│ │ Backend │ │
|
||||||
|
│ └────────────┘ │
|
||||||
|
└──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Storage Format
|
||||||
|
|
||||||
|
Snapshots are stored in S3 with the following naming convention:
|
||||||
|
|
||||||
|
```
|
||||||
|
s3://bucket/client1/tank_data_2024-02-13_14:30:00.zfs.gz
|
||||||
|
^ ^ ^
|
||||||
|
client dataset timestamp
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
- API keys are hashed using SHA-256
|
||||||
|
- S3 bucket policies can restrict access to backup server only
|
||||||
|
- Server-side encryption available in S3
|
||||||
|
- Client-side encryption possible via custom compression pipeline
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
### Health Check
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:8080/health
|
||||||
|
```
|
||||||
|
|
||||||
|
### Server Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SystemD
|
||||||
|
journalctl -u zfs-server -f
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
docker logs -f zfs-server
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Project Layout
|
||||||
|
|
||||||
|
- `cmd/` - Main applications (entry points)
|
||||||
|
- `internal/` - Private application code
|
||||||
|
- `server/` - Server logic, HTTP handlers, storage backends
|
||||||
|
- `client/` - Client logic for creating and uploading snapshots
|
||||||
|
- `restore/` - Restore logic for downloading and restoring snapshots
|
||||||
|
|
||||||
|
### Building
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build all
|
||||||
|
go build ./...
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
go test ./...
|
||||||
|
|
||||||
|
# Lint
|
||||||
|
go vet ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT License
|
||||||
Reference in New Issue
Block a user