// Package client provides ZFS snapshot backup client functionality. // This file contains snapshot management functions for creating and sending snapshots. package client import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "os" "os/exec" "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/mistifyio/go-zfs" "github.com/pierrec/lz4/v4" ) // CreateBookmark creates a ZFS bookmark from a snapshot. // Bookmarks allow incremental sends even after the source snapshot is deleted. func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error { // Extract snapshot name from full path (dataset@snapshot -> snapshot) parts := strings.Split(snapshot.Name, "@") if len(parts) != 2 { return fmt.Errorf("invalid snapshot name format: %s", snapshot.Name) } snapshotName := parts[1] bookmarkName := fmt.Sprintf("%s#%s", c.config.LocalDataset, snapshotName) // Create bookmark using zfs command cmd := exec.Command("zfs", "bookmark", snapshot.Name, bookmarkName) output, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("failed to create bookmark: %v: %s", err, string(output)) } fmt.Printf("✓ Created bookmark: %s\n", bookmarkName) return nil } // GetLastBookmark returns the most recent bookmark for the dataset. // Bookmarks are used as the base for incremental sends. func (c *Client) GetLastBookmark() (string, error) { // List all bookmarks for the dataset cmd := exec.Command("zfs", "list", "-t", "bookmark", "-o", "name", "-H", "-r", c.config.LocalDataset) output, err := cmd.Output() if err != nil { return "", nil // No bookmarks yet } bookmarks := strings.Split(strings.TrimSpace(string(output)), "\n") if len(bookmarks) == 0 || bookmarks[0] == "" { return "", nil } // Return the last bookmark (most recent) return bookmarks[len(bookmarks)-1], nil } // GetLastSnapshot returns the most recent snapshot for the dataset. func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) { ds, err := zfs.GetDataset(c.config.LocalDataset) if err != nil { return nil, fmt.Errorf("failed to get dataset: %v", err) } snapshots, err := ds.Snapshots() if err != nil { return nil, fmt.Errorf("failed to list snapshots: %v", err) } if len(snapshots) == 0 { return nil, nil } // Return the last snapshot (most recent) return snapshots[len(snapshots)-1], nil } // SendIncremental sends an incremental stream from a bookmark or snapshot. // If base is empty, sends a full stream. func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error { estimatedSize := c.GetSnapshotSize(snapshot) // Determine if this is incremental or full isIncremental := base != "" var uploadMethod string if isIncremental { uploadMethod = "incremental" } else { uploadMethod = "full" } // Request upload authorization from server uploadReq := map[string]interface{}{ "client_id": c.config.ClientID, "api_key": c.config.APIKey, "dataset_name": c.config.LocalDataset, "timestamp": time.Now().Format(time.RFC3339), "compressed": c.config.Compress, "estimated_size": estimatedSize, "incremental": isIncremental, "base_snapshot": base, } reqBody, _ := json.Marshal(uploadReq) resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody)) if err != nil { return fmt.Errorf("failed to request upload: %v", err) } defer resp.Body.Close() var uploadResp struct { Success bool `json:"success"` Message string `json:"message"` UploadURL string `json:"upload_url"` UploadMethod string `json:"upload_method"` StorageKey string `json:"storage_key"` } if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil { return fmt.Errorf("failed to decode response: %v", err) } if !uploadResp.Success { return fmt.Errorf("upload not authorized: %s", uploadResp.Message) } fmt.Printf("→ Upload authorized\n") fmt.Printf(" Method: %s\n", uploadResp.UploadMethod) fmt.Printf(" Type: %s\n", uploadMethod) fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey) // Choose upload method based on server response if uploadResp.UploadMethod == "s3" { return c.streamIncrementalToS3(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey) } return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey) } // streamIncrementalToS3 streams an incremental ZFS snapshot to S3 using AWS SDK. func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error { fmt.Printf("→ Uploading snapshot to S3...\n") // Ensure endpoint has valid URI scheme endpoint := c.config.S3Endpoint if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") { endpoint = "http://" + endpoint } // Create AWS config awsCfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(c.config.S3Region), config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( c.config.S3AccessKey, c.config.S3SecretKey, "", )), ) if err != nil { return fmt.Errorf("failed to load AWS config: %v", err) } // Determine if using custom endpoint (non-AWS) customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com" // Create S3 client s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) { if customEndpoint { o.BaseEndpoint = aws.String(endpoint) o.UsePathStyle = true // Required for MinIO compatible storage } }) // Create ZFS send command var cmd *exec.Cmd if base != "" { // Incremental send from bookmark or snapshot fmt.Printf(" Base: %s\n", base) cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name) } else { // Full send cmd = exec.Command("zfs", "send", snapshot.Name) } zfsOut, err := cmd.StdoutPipe() if err != nil { return fmt.Errorf("failed to create pipe: %v", err) } if err := cmd.Start(); err != nil { return fmt.Errorf("failed to start zfs send: %v", err) } var reader io.Reader = zfsOut // Apply LZ4 compression if enabled if c.config.Compress { fmt.Printf(" Compressing with LZ4...\n") pr, pw := io.Pipe() lz4Writer := lz4.NewWriter(pw) lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance go func() { // Copy zfs output to LZ4 writer io.Copy(lz4Writer, zfsOut) // Close LZ4 writer first to flush, then close pipe lz4Writer.Close() pw.Close() }() reader = pr } // Upload to S3 using PutObject _, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{ Bucket: aws.String(c.config.S3Bucket), Key: aws.String(storageKey), Body: reader, ContentType: aws.String("application/octet-stream"), }) // Wait for zfs send to complete if err := cmd.Wait(); err != nil { return fmt.Errorf("zfs send failed: %v", err) } if err != nil { return fmt.Errorf("failed to upload to S3: %v", err) } fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n") return nil } // sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH. // This method is used when the server uses local ZFS storage. func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error { fmt.Printf("→ Sending via ZFS send/receive...\n") // Extract server host from URL serverHost := c.config.ServerURL if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") { serverHost = serverHost[7:] } else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") { serverHost = serverHost[8:] } // Remove port if present if idx := strings.LastIndex(serverHost, ":"); idx > 0 { serverHost = serverHost[:idx] } // Build zfs send command var zfsSendCmd string if base != "" { // Incremental send fmt.Printf(" Base: %s\n", base) zfsSendCmd = fmt.Sprintf("zfs send -i %s %s", base, snapshot.Name) } else { // Full send zfsSendCmd = fmt.Sprintf("zfs send %s", snapshot.Name) } // Execute ZFS send over SSH cmd := exec.Command("sh", "-c", fmt.Sprintf("%s | ssh %s 'zfs recv -F %s'", zfsSendCmd, serverHost, receivePath)) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return fmt.Errorf("failed to send snapshot: %v", err) } fmt.Printf("✓ Snapshot sent successfully!\n") return nil }