restore
This commit is contained in:
@@ -98,7 +98,35 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := client.RestoreSnapshot(snapshot, targetDataset, force); err != nil {
|
if err := client.RestoreSnapshot(snapshot, targetDataset, force, snapshots); err != nil {
|
||||||
|
fmt.Printf("Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "mount":
|
||||||
|
// Mount a restored dataset to access files
|
||||||
|
if len(os.Args) < 3 {
|
||||||
|
fmt.Println("Usage: zfs-restore mount <dataset> [mountpoint]")
|
||||||
|
fmt.Println("\nExamples:")
|
||||||
|
fmt.Println(" zfs-restore mount tank/restored /mnt/recover")
|
||||||
|
fmt.Println(" zfs-restore mount tank/restored # interactive")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset := os.Args[2]
|
||||||
|
mountpoint := ""
|
||||||
|
|
||||||
|
if len(os.Args) > 3 {
|
||||||
|
mountpoint = os.Args[3]
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Mountpoint [/mnt/recover]: ")
|
||||||
|
fmt.Scanln(&mountpoint)
|
||||||
|
if mountpoint == "" {
|
||||||
|
mountpoint = "/mnt/recover"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := client.MountDataset(dataset, mountpoint); err != nil {
|
||||||
fmt.Printf("Error: %v\n", err)
|
fmt.Printf("Error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -119,11 +147,13 @@ func printUsage() {
|
|||||||
fmt.Println("\nCommands:")
|
fmt.Println("\nCommands:")
|
||||||
fmt.Println(" list - List available snapshots")
|
fmt.Println(" list - List available snapshots")
|
||||||
fmt.Println(" restore <#|latest> <dataset> [--force] - Restore snapshot to ZFS")
|
fmt.Println(" restore <#|latest> <dataset> [--force] - Restore snapshot to ZFS")
|
||||||
|
fmt.Println(" mount <dataset> [mountpoint] - Mount dataset to recover files")
|
||||||
fmt.Println(" help - Show this help message")
|
fmt.Println(" help - Show this help message")
|
||||||
fmt.Println("\nQuick Examples:")
|
fmt.Println("\nQuick Examples:")
|
||||||
fmt.Println(" zfs-restore list - See available backups")
|
fmt.Println(" zfs-restore list - See available backups")
|
||||||
fmt.Println(" zfs-restore restore latest tank/data - Restore most recent backup")
|
fmt.Println(" zfs-restore restore latest tank/data - Restore most recent backup")
|
||||||
fmt.Println(" zfs-restore restore 1 tank/restored - Restore snapshot #1")
|
fmt.Println(" zfs-restore restore 1 tank/restored - Restore snapshot #1")
|
||||||
|
fmt.Println(" zfs-restore mount tank/restored /mnt - Mount to recover files")
|
||||||
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||||
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
||||||
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
||||||
|
|||||||
@@ -9,12 +9,15 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mistifyio/go-zfs"
|
"github.com/mistifyio/go-zfs"
|
||||||
"github.com/pierrec/lz4/v4"
|
"github.com/pierrec/lz4/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var uploadUrl = "/upload-stream/"
|
||||||
|
|
||||||
// SnapshotResult contains the result of a snapshot creation and send operation.
|
// SnapshotResult contains the result of a snapshot creation and send operation.
|
||||||
type SnapshotResult struct {
|
type SnapshotResult struct {
|
||||||
FullBackup bool
|
FullBackup bool
|
||||||
@@ -121,7 +124,14 @@ func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
reqBody, _ := json.Marshal(uploadReq)
|
reqBody, _ := json.Marshal(uploadReq)
|
||||||
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
|
uploadURL := c.config.ServerURL
|
||||||
|
// Ensure proper URL format
|
||||||
|
if !strings.HasSuffix(uploadURL, "/") {
|
||||||
|
uploadURL += "/"
|
||||||
|
}
|
||||||
|
uploadURL += "upload"
|
||||||
|
|
||||||
|
resp, err := http.Post(uploadURL, "application/json", bytes.NewBuffer(reqBody))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to request upload: %v", err)
|
return fmt.Errorf("failed to request upload: %v", err)
|
||||||
}
|
}
|
||||||
@@ -134,7 +144,6 @@ func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error {
|
|||||||
UploadMethod string `json:"upload_method"`
|
UploadMethod string `json:"upload_method"`
|
||||||
StorageKey string `json:"storage_key"`
|
StorageKey string `json:"storage_key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||||
return fmt.Errorf("failed to decode response: %v", err)
|
return fmt.Errorf("failed to decode response: %v", err)
|
||||||
}
|
}
|
||||||
@@ -193,7 +202,23 @@ func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageK
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create HTTP request to server
|
// Create HTTP request to server
|
||||||
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
// Build full URL properly - check if uploadURL is already full URL
|
||||||
|
fullURL := uploadURL
|
||||||
|
// If uploadURL is a relative path, prepend server URL
|
||||||
|
if !strings.HasPrefix(uploadURL, "http://") && !strings.HasPrefix(uploadURL, "https://") {
|
||||||
|
fullURL = c.config.ServerURL
|
||||||
|
// Remove trailing slash from base URL if present
|
||||||
|
fullURL = strings.TrimRight(fullURL, "/")
|
||||||
|
// Add leading slash to upload URL if not present
|
||||||
|
if !strings.HasPrefix(uploadURL, "/") {
|
||||||
|
uploadURL = "/" + uploadURL
|
||||||
|
}
|
||||||
|
fullURL += uploadURL
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" Streaming to: %s\n", fullURL)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", fullURL, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create request: %v", err)
|
return fmt.Errorf("failed to create request: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ type Config struct {
|
|||||||
LocalDataset string `json:"local_dataset"`
|
LocalDataset string `json:"local_dataset"`
|
||||||
// Compress enables LZ4 compression for transfers
|
// Compress enables LZ4 compression for transfers
|
||||||
Compress bool `json:"compress"`
|
Compress bool `json:"compress"`
|
||||||
|
UploadURL string `json:upload_url`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads client configuration from environment variables and .env file.
|
// LoadConfig loads client configuration from environment variables and .env file.
|
||||||
@@ -34,6 +35,7 @@ func LoadConfig() *Config {
|
|||||||
ServerURL: getEnv("SERVER_URL", "http://localhost:8080"),
|
ServerURL: getEnv("SERVER_URL", "http://localhost:8080"),
|
||||||
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
|
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
|
||||||
Compress: getEnv("COMPRESS", "true") == "true",
|
Compress: getEnv("COMPRESS", "true") == "true",
|
||||||
|
UploadURL: "/upload-stream/",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ func (c *Client) DisplaySnapshots(snapshots []*SnapshotMetadata) {
|
|||||||
|
|
||||||
// RestoreSnapshot downloads and restores a snapshot to a local ZFS dataset.
|
// RestoreSnapshot downloads and restores a snapshot to a local ZFS dataset.
|
||||||
// If force is true, existing datasets will be overwritten.
|
// If force is true, existing datasets will be overwritten.
|
||||||
func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool) error {
|
func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool, allSnapshots []*SnapshotMetadata) error {
|
||||||
fmt.Printf("\n=== Restoring Snapshot ===\n")
|
fmt.Printf("\n=== Restoring Snapshot ===\n")
|
||||||
fmt.Printf("Source: %s\n", snapshot.SnapshotID)
|
fmt.Printf("Source: %s\n", snapshot.SnapshotID)
|
||||||
fmt.Printf("Target: %s\n", targetDataset)
|
fmt.Printf("Target: %s\n", targetDataset)
|
||||||
@@ -128,25 +128,82 @@ func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset strin
|
|||||||
fmt.Printf("Compressed: %v\n", snapshot.Compressed)
|
fmt.Printf("Compressed: %v\n", snapshot.Compressed)
|
||||||
fmt.Printf("Incremental: %v\n\n", snapshot.Incremental)
|
fmt.Printf("Incremental: %v\n\n", snapshot.Incremental)
|
||||||
|
|
||||||
// For incremental snapshots, we need special handling
|
// For incremental snapshots, we need to restore base first
|
||||||
if snapshot.Incremental && force {
|
if snapshot.Incremental && snapshot.BaseSnapshot != "" {
|
||||||
// Check if target dataset exists
|
fmt.Printf("\n⚠ This is an INCREMENTAL backup.\n")
|
||||||
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
fmt.Printf(" Base snapshot needed: %s\n\n", snapshot.BaseSnapshot)
|
||||||
fmt.Printf("→ Destroying existing dataset for incremental restore...\n")
|
|
||||||
// Destroy the existing dataset to allow clean restore
|
// Find the base snapshot in the list
|
||||||
cmd := exec.Command("zfs", "destroy", "-r", targetDataset)
|
var baseSnap *SnapshotMetadata
|
||||||
if err := cmd.Run(); err != nil {
|
for _, s := range allSnapshots {
|
||||||
fmt.Printf(" Warning: could not destroy dataset (may not exist): %v\n", err)
|
if s.SnapshotID == snapshot.BaseSnapshot {
|
||||||
}
|
baseSnap = s
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if baseSnap == nil {
|
||||||
|
return fmt.Errorf("base snapshot %s not found on server. Cannot restore incremental without base", snapshot.BaseSnapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found base snapshot:\n")
|
||||||
|
fmt.Printf(" - %s from %s (%.2f GB)\n\n",
|
||||||
|
baseSnap.SnapshotID,
|
||||||
|
baseSnap.Timestamp.Format("2006-01-02 15:04:05"),
|
||||||
|
float64(baseSnap.SizeBytes)/(1024*1024*1024))
|
||||||
|
|
||||||
|
fmt.Printf("To restore this incremental, I need to:\n")
|
||||||
|
fmt.Printf(" 1. Restore base snapshot: %s\n", baseSnap.SnapshotID)
|
||||||
|
fmt.Printf(" 2. Apply incremental: %s\n\n", snapshot.SnapshotID)
|
||||||
|
|
||||||
|
// Ask for confirmation
|
||||||
|
fmt.Printf("Continue? [y/N]: ")
|
||||||
|
var confirm string
|
||||||
|
fmt.Scanln(&confirm)
|
||||||
|
if confirm != "y" && confirm != "Y" {
|
||||||
|
fmt.Println("Cancelled.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// First restore the base snapshot
|
||||||
|
fmt.Printf("\n→ Restoring base snapshot...\n")
|
||||||
|
if err := c.restoreOneSnapshot(baseSnap, targetDataset, true); err != nil {
|
||||||
|
return fmt.Errorf("failed to restore base snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then apply the incremental
|
||||||
|
fmt.Printf("\n→ Applying incremental snapshot...\n")
|
||||||
|
if err := c.restoreOneSnapshot(snapshot, targetDataset, false); err != nil {
|
||||||
|
return fmt.Errorf("failed to apply incremental: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n✓ Incremental restore completed!\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.restoreOneSnapshot(snapshot, targetDataset, force)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreOneSnapshot downloads and restores a single snapshot
|
||||||
|
func (c *Client) restoreOneSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool) error {
|
||||||
|
// First, let's try to download - only destroy if download succeeds
|
||||||
|
var originalExists bool
|
||||||
|
if force {
|
||||||
|
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
||||||
|
originalExists = true
|
||||||
|
fmt.Printf("→ Target dataset exists, will overwrite\n")
|
||||||
|
} else {
|
||||||
|
originalExists = false
|
||||||
|
fmt.Printf("→ Target dataset does not exist, will create new\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
// Check if target dataset exists
|
// Check if target dataset exists
|
||||||
if !force {
|
if !force {
|
||||||
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
||||||
return fmt.Errorf("target dataset %s already exists. Use --force to overwrite", targetDataset)
|
return fmt.Errorf("target dataset %s already exists. Use --force to overwrite", targetDataset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Request download from server
|
// Request download from server
|
||||||
downloadURL := fmt.Sprintf("%s/download?client_id=%s&api_key=%s&snapshot_id=%s",
|
downloadURL := fmt.Sprintf("%s/download?client_id=%s&api_key=%s&snapshot_id=%s",
|
||||||
@@ -165,6 +222,18 @@ func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset strin
|
|||||||
return fmt.Errorf("download failed: %s", body)
|
return fmt.Errorf("download failed: %s", body)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Download succeeded - now safe to destroy if needed
|
||||||
|
if force && originalExists {
|
||||||
|
fmt.Printf("→ Destroying existing dataset %s...\n", targetDataset)
|
||||||
|
cmd := exec.Command("zfs", "destroy", "-r", targetDataset)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(" Destroy output: %s\n", string(output))
|
||||||
|
return fmt.Errorf("failed to destroy existing dataset: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Destroyed successfully\n")
|
||||||
|
}
|
||||||
|
|
||||||
// Create decompression reader if needed
|
// Create decompression reader if needed
|
||||||
var reader io.Reader = resp.Body
|
var reader io.Reader = resp.Body
|
||||||
if snapshot.Compressed {
|
if snapshot.Compressed {
|
||||||
@@ -194,6 +263,13 @@ func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset strin
|
|||||||
fmt.Printf("\n✓ Snapshot restored successfully!\n")
|
fmt.Printf("\n✓ Snapshot restored successfully!\n")
|
||||||
fmt.Printf(" Dataset: %s\n", targetDataset)
|
fmt.Printf(" Dataset: %s\n", targetDataset)
|
||||||
|
|
||||||
|
// Verify the dataset exists after restore
|
||||||
|
if _, err := zfs.GetDataset(targetDataset); err == nil {
|
||||||
|
fmt.Printf(" Verified: dataset exists\n")
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" Warning: could not verify dataset exists: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -242,15 +318,22 @@ func (c *Client) RestoreToFile(snapshot *SnapshotMetadata, outputFile string) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MountSnapshot mounts a restored dataset to a specified mountpoint.
|
// MountDataset mounts a restored dataset to a specified mountpoint for file recovery.
|
||||||
// This allows browsing the restored files.
|
func (c *Client) MountDataset(dataset, mountpoint string) error {
|
||||||
func (c *Client) MountSnapshot(dataset, mountpoint string) error {
|
fmt.Printf("\n=== Mounting Dataset ===\n")
|
||||||
|
fmt.Printf("Dataset: %s\n", dataset)
|
||||||
|
fmt.Printf("Mountpoint: %s\n\n", mountpoint)
|
||||||
|
|
||||||
ds, err := zfs.GetDataset(dataset)
|
ds, err := zfs.GetDataset(dataset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dataset not found: %v", err)
|
return fmt.Errorf("dataset not found: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create mountpoint if it doesn't exist
|
// Check current mountpoint
|
||||||
|
currentMP, _ := ds.GetProperty("mountpoint")
|
||||||
|
fmt.Printf("Current mountpoint: %s\n", currentMP)
|
||||||
|
|
||||||
|
// Create mountpoint directory if it doesn't exist
|
||||||
if err := os.MkdirAll(mountpoint, 0755); err != nil {
|
if err := os.MkdirAll(mountpoint, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create mountpoint: %v", err)
|
return fmt.Errorf("failed to create mountpoint: %v", err)
|
||||||
}
|
}
|
||||||
@@ -260,13 +343,17 @@ func (c *Client) MountSnapshot(dataset, mountpoint string) error {
|
|||||||
return fmt.Errorf("failed to set mountpoint: %v", err)
|
return fmt.Errorf("failed to set mountpoint: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mount the dataset
|
// Mount the dataset if not already mounted
|
||||||
cmd := exec.Command("zfs", "mount", dataset)
|
cmd := exec.Command("zfs", "mount", dataset)
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to mount: %v", err)
|
// Might already be mounted, that's OK
|
||||||
|
fmt.Printf(" (dataset may already be mounted)\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("✓ Mounted %s at %s\n", dataset, mountpoint)
|
fmt.Printf("\n✓ Mounted successfully!\n")
|
||||||
|
fmt.Printf(" Access files at: %s\n", mountpoint)
|
||||||
|
fmt.Printf(" When done, run: umount %s\n", mountpoint)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -238,6 +238,7 @@ func (s *Server) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
|||||||
Message: "Ready to receive snapshot",
|
Message: "Ready to receive snapshot",
|
||||||
UploadMethod: "zfs-receive",
|
UploadMethod: "zfs-receive",
|
||||||
StorageKey: snapshotName,
|
StorageKey: snapshotName,
|
||||||
|
UploadURL: fmt.Sprintf("/upload-stream/%s", req.ClientID),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -280,12 +281,24 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
|||||||
size = -1 // Use streaming upload for unknown size
|
size = -1 // Use streaming upload for unknown size
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.s3Backend == nil {
|
if s.s3Backend == nil && s.localBackend == nil {
|
||||||
log.Printf("Error: S3 backend not initialized")
|
log.Printf("Error: No storage backend configured")
|
||||||
http.Error(w, "S3 backend not configured", http.StatusInternalServerError)
|
http.Error(w, "No storage backend configured", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Determine storage type based on client configuration
|
||||||
|
client, err := s.db.GetClient(clientID)
|
||||||
|
if err != nil || client == nil {
|
||||||
|
http.Error(w, "Client not found", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var actualSize int64
|
||||||
|
|
||||||
|
// Handle based on storage type
|
||||||
|
if client.StorageType == "s3" && s.s3Backend != nil {
|
||||||
|
// Upload to S3
|
||||||
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
|
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
|
||||||
log.Printf("Error uploading to S3: %v", err)
|
log.Printf("Error uploading to S3: %v", err)
|
||||||
http.Error(w, "Upload failed", http.StatusInternalServerError)
|
http.Error(w, "Upload failed", http.StatusInternalServerError)
|
||||||
@@ -293,11 +306,24 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get actual size after upload
|
// Get actual size after upload
|
||||||
actualSize, err := s.s3Backend.GetSize(ctx, storageKey)
|
actualSize, err = s.s3Backend.GetSize(ctx, storageKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error getting object size: %v", err)
|
log.Printf("Error getting object size: %v", err)
|
||||||
actualSize = size
|
actualSize = size
|
||||||
}
|
}
|
||||||
|
} else if client.StorageType == "local" && s.localBackend != nil {
|
||||||
|
// Upload to local ZFS
|
||||||
|
if err := s.localBackend.Receive(storageKey, r.Body, compressedStr == "true"); err != nil {
|
||||||
|
log.Printf("Error uploading to local ZFS: %v", err)
|
||||||
|
http.Error(w, "Upload failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
actualSize = size
|
||||||
|
} else {
|
||||||
|
log.Printf("Error: Storage type %s not configured", client.StorageType)
|
||||||
|
http.Error(w, "Storage type not configured", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Save metadata to database
|
// Save metadata to database
|
||||||
metadata := &SnapshotMetadata{
|
metadata := &SnapshotMetadata{
|
||||||
|
|||||||
@@ -6,12 +6,15 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
"github.com/mistifyio/go-zfs"
|
"github.com/mistifyio/go-zfs"
|
||||||
|
"github.com/pierrec/lz4/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StorageBackend defines the interface for different storage types
|
// StorageBackend defines the interface for different storage types
|
||||||
@@ -138,6 +141,41 @@ func (l *LocalBackend) Upload(ctx context.Context, key string, data io.Reader, s
|
|||||||
return fmt.Errorf("local backend upload not supported via storage interface, use zfs receive endpoint")
|
return fmt.Errorf("local backend upload not supported via storage interface, use zfs receive endpoint")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Receive receives a ZFS snapshot stream and restores it to the local dataset
|
||||||
|
func (l *LocalBackend) Receive(snapshotName string, data io.Reader, compressed bool) error {
|
||||||
|
// Extract the target dataset from the snapshot name
|
||||||
|
// snapshotName format: dataset@name -> we want just the dataset part
|
||||||
|
parts := strings.Split(snapshotName, "@")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return fmt.Errorf("invalid snapshot name format: %s", snapshotName)
|
||||||
|
}
|
||||||
|
|
||||||
|
targetDataset := parts[0]
|
||||||
|
|
||||||
|
log.Printf("Receiving ZFS snapshot to %s (compressed: %v)", targetDataset, compressed)
|
||||||
|
|
||||||
|
// If compressed, decompress with LZ4 first
|
||||||
|
var reader io.Reader = data
|
||||||
|
if compressed {
|
||||||
|
lz4Reader := lz4.NewReader(data)
|
||||||
|
reader = lz4Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use go-zfs library to receive the snapshot (with -F force flag)
|
||||||
|
// Note: The library's ReceiveSnapshot doesn't support -F, so we use exec.Command
|
||||||
|
cmd := exec.Command("zfs", "receive", "-F", snapshotName)
|
||||||
|
cmd.Stdin = reader
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("zfs receive failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Successfully received snapshot: %s", snapshotName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Download creates a zfs send stream
|
// Download creates a zfs send stream
|
||||||
func (l *LocalBackend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
func (l *LocalBackend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||||
cmd := exec.CommandContext(ctx, "zfs", "send", key)
|
cmd := exec.CommandContext(ctx, "zfs", "send", key)
|
||||||
|
|||||||
BIN
zfs-client
BIN
zfs-client
Binary file not shown.
BIN
zfs-server
Executable file
BIN
zfs-server
Executable file
Binary file not shown.
Reference in New Issue
Block a user