429 lines
12 KiB
Go
429 lines
12 KiB
Go
// Package client provides ZFS snapshot backup client functionality.
|
|
// This file contains snapshot management functions including creation,
|
|
// bookmarking, and rotation similar to zfs-auto-snapshot.
|
|
package client
|
|
|
|
import (
|
|
"bytes"
|
|
"compress/gzip"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"os"
|
|
"os/exec"
|
|
"sort"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/mistifyio/go-zfs"
|
|
)
|
|
|
|
// SnapshotPolicy defines retention settings for automatic snapshots.
|
|
type SnapshotPolicy struct {
|
|
// KeepHourly is the number of hourly snapshots to keep
|
|
KeepHourly int
|
|
// KeepDaily is the number of daily snapshots to keep
|
|
KeepDaily int
|
|
// KeepWeekly is the number of weekly snapshots to keep
|
|
KeepWeekly int
|
|
// KeepMonthly is the number of monthly snapshots to keep
|
|
KeepMonthly int
|
|
}
|
|
|
|
// DefaultPolicy returns the default snapshot retention policy.
|
|
func DefaultPolicy() *SnapshotPolicy {
|
|
return &SnapshotPolicy{
|
|
KeepHourly: 24,
|
|
KeepDaily: 7,
|
|
KeepWeekly: 4,
|
|
KeepMonthly: 12,
|
|
}
|
|
}
|
|
|
|
// SnapshotType represents the type of snapshot (hourly, daily, etc.)
|
|
type SnapshotType string
|
|
|
|
const (
|
|
SnapshotHourly SnapshotType = "hourly"
|
|
SnapshotDaily SnapshotType = "daily"
|
|
SnapshotWeekly SnapshotType = "weekly"
|
|
SnapshotMonthly SnapshotType = "monthly"
|
|
SnapshotManual SnapshotType = "manual"
|
|
)
|
|
|
|
// CreateSnapshotWithType creates a snapshot with a specific type label.
|
|
// The snapshot name follows the pattern: zfs-backup-<type>-<timestamp>
|
|
func (c *Client) CreateSnapshotWithType(snapshotType SnapshotType) (*zfs.Dataset, error) {
|
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
|
}
|
|
|
|
timestamp := time.Now().Format("2006-01-02_15-04-05")
|
|
snapshotName := fmt.Sprintf("zfs-backup-%s-%s", snapshotType, timestamp)
|
|
|
|
snapshot, err := ds.Snapshot(snapshotName, false)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
|
}
|
|
|
|
fmt.Printf("✓ Created %s snapshot: %s@%s\n", snapshotType, c.config.LocalDataset, snapshotName)
|
|
return snapshot, nil
|
|
}
|
|
|
|
// CreateBookmark creates a ZFS bookmark from a snapshot.
|
|
// Bookmarks allow incremental sends even after the source snapshot is deleted.
|
|
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
|
|
// Extract snapshot name from full path (dataset@snapshot -> snapshot)
|
|
parts := strings.Split(snapshot.Name, "@")
|
|
if len(parts) != 2 {
|
|
return fmt.Errorf("invalid snapshot name format: %s", snapshot.Name)
|
|
}
|
|
snapshotName := parts[1]
|
|
bookmarkName := fmt.Sprintf("%s#%s", c.config.LocalDataset, snapshotName)
|
|
|
|
// Create bookmark using zfs command
|
|
cmd := exec.Command("zfs", "bookmark", snapshot.Name, bookmarkName)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create bookmark: %v: %s", err, string(output))
|
|
}
|
|
|
|
fmt.Printf("✓ Created bookmark: %s\n", bookmarkName)
|
|
return nil
|
|
}
|
|
|
|
// GetLastBookmark returns the most recent bookmark for the dataset.
|
|
// Bookmarks are used as the base for incremental sends.
|
|
func (c *Client) GetLastBookmark() (string, error) {
|
|
// List all bookmarks for the dataset
|
|
cmd := exec.Command("zfs", "list", "-t", "bookmark", "-o", "name", "-H", "-r", c.config.LocalDataset)
|
|
output, err := cmd.Output()
|
|
if err != nil {
|
|
return "", nil // No bookmarks yet
|
|
}
|
|
|
|
bookmarks := strings.Split(strings.TrimSpace(string(output)), "\n")
|
|
if len(bookmarks) == 0 || bookmarks[0] == "" {
|
|
return "", nil
|
|
}
|
|
|
|
// Return the last bookmark (most recent)
|
|
return bookmarks[len(bookmarks)-1], nil
|
|
}
|
|
|
|
// GetLastSnapshot returns the most recent snapshot for the dataset.
|
|
func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) {
|
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
|
}
|
|
|
|
snapshots, err := ds.Snapshots()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to list snapshots: %v", err)
|
|
}
|
|
|
|
if len(snapshots) == 0 {
|
|
return nil, nil
|
|
}
|
|
|
|
// Return the last snapshot (most recent)
|
|
return snapshots[len(snapshots)-1], nil
|
|
}
|
|
|
|
// SendIncremental sends an incremental stream from a bookmark or snapshot.
|
|
// If base is empty, sends a full stream.
|
|
func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
|
|
estimatedSize := c.GetSnapshotSize(snapshot)
|
|
|
|
// Determine if this is incremental or full
|
|
isIncremental := base != ""
|
|
var uploadMethod string
|
|
if isIncremental {
|
|
uploadMethod = "incremental"
|
|
} else {
|
|
uploadMethod = "full"
|
|
}
|
|
|
|
// Request upload authorization from server
|
|
uploadReq := map[string]interface{}{
|
|
"client_id": c.config.ClientID,
|
|
"api_key": c.config.APIKey,
|
|
"dataset_name": c.config.LocalDataset,
|
|
"timestamp": time.Now().Format(time.RFC3339),
|
|
"compressed": c.config.Compress,
|
|
"estimated_size": estimatedSize,
|
|
"incremental": isIncremental,
|
|
"base_snapshot": base,
|
|
}
|
|
|
|
reqBody, _ := json.Marshal(uploadReq)
|
|
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
|
|
if err != nil {
|
|
return fmt.Errorf("failed to request upload: %v", err)
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
var uploadResp struct {
|
|
Success bool `json:"success"`
|
|
Message string `json:"message"`
|
|
UploadURL string `json:"upload_url"`
|
|
UploadMethod string `json:"upload_method"`
|
|
StorageKey string `json:"storage_key"`
|
|
}
|
|
|
|
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
|
return fmt.Errorf("failed to decode response: %v", err)
|
|
}
|
|
|
|
if !uploadResp.Success {
|
|
return fmt.Errorf("upload not authorized: %s", uploadResp.Message)
|
|
}
|
|
|
|
fmt.Printf("→ Upload authorized\n")
|
|
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
|
fmt.Printf(" Type: %s\n", uploadMethod)
|
|
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
|
|
|
// Choose upload method based on server response
|
|
if uploadResp.UploadMethod == "s3" {
|
|
return c.streamIncrementalToS3(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
|
}
|
|
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
|
|
}
|
|
|
|
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3.
|
|
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
|
fmt.Printf("→ Streaming snapshot to S3...\n")
|
|
|
|
// Create ZFS send command
|
|
var cmd *exec.Cmd
|
|
if base != "" {
|
|
// Incremental send from bookmark or snapshot
|
|
fmt.Printf(" Base: %s\n", base)
|
|
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
|
|
} else {
|
|
// Full send
|
|
cmd = exec.Command("zfs", "send", snapshot.Name)
|
|
}
|
|
|
|
zfsOut, err := cmd.StdoutPipe()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create pipe: %v", err)
|
|
}
|
|
|
|
if err := cmd.Start(); err != nil {
|
|
return fmt.Errorf("failed to start zfs send: %v", err)
|
|
}
|
|
|
|
var reader io.Reader = zfsOut
|
|
|
|
// Apply gzip compression if enabled
|
|
if c.config.Compress {
|
|
fmt.Printf(" Compressing with gzip...\n")
|
|
pr, pw := io.Pipe()
|
|
gzWriter := gzip.NewWriter(pw)
|
|
|
|
go func() {
|
|
// Copy zfs output to gzip writer
|
|
io.Copy(gzWriter, zfsOut)
|
|
// Close gzip writer first to flush footer, then close pipe
|
|
gzWriter.Close()
|
|
pw.Close()
|
|
}()
|
|
|
|
reader = pr
|
|
}
|
|
|
|
// Create HTTP request
|
|
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create request: %v", err)
|
|
}
|
|
|
|
// Set required headers
|
|
req.Header.Set("X-API-Key", c.config.APIKey)
|
|
req.Header.Set("X-Storage-Key", storageKey)
|
|
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
|
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
|
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
|
|
if base != "" {
|
|
req.Header.Set("X-Base-Snapshot", base)
|
|
}
|
|
req.Header.Set("Content-Type", "application/octet-stream")
|
|
|
|
// Send request with no timeout for large uploads
|
|
client := &http.Client{
|
|
Timeout: 0,
|
|
}
|
|
|
|
httpResp, err := client.Do(req)
|
|
if err != nil {
|
|
cmd.Process.Kill()
|
|
return fmt.Errorf("failed to upload: %v", err)
|
|
}
|
|
defer httpResp.Body.Close()
|
|
|
|
if httpResp.StatusCode != http.StatusOK {
|
|
body, _ := io.ReadAll(httpResp.Body)
|
|
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
|
}
|
|
|
|
if err := cmd.Wait(); err != nil {
|
|
return fmt.Errorf("zfs send failed: %v", err)
|
|
}
|
|
|
|
// Parse response
|
|
var result struct {
|
|
Success bool `json:"success"`
|
|
Message string `json:"message"`
|
|
Size int64 `json:"size"`
|
|
}
|
|
|
|
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
|
return fmt.Errorf("failed to decode response: %v", err)
|
|
}
|
|
|
|
if !result.Success {
|
|
return fmt.Errorf("upload failed: %s", result.Message)
|
|
}
|
|
|
|
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
|
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
|
|
|
return nil
|
|
}
|
|
|
|
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
|
|
// This method is used when the server uses local ZFS storage.
|
|
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
|
|
fmt.Printf("-> Sending via ZFS send/receive...\n")
|
|
|
|
// Extract server host from URL
|
|
serverHost := c.config.ServerURL
|
|
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
|
serverHost = serverHost[7:]
|
|
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
|
serverHost = serverHost[8:]
|
|
}
|
|
|
|
// Remove port if present
|
|
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
|
serverHost = serverHost[:idx]
|
|
}
|
|
|
|
// Build zfs send command
|
|
var zfsSendCmd string
|
|
if base != "" {
|
|
// Incremental send
|
|
fmt.Printf(" Base: %s\n", base)
|
|
zfsSendCmd = fmt.Sprintf("zfs send -i %s %s", base, snapshot.Name)
|
|
} else {
|
|
// Full send
|
|
zfsSendCmd = fmt.Sprintf("zfs send %s", snapshot.Name)
|
|
}
|
|
|
|
// Execute ZFS send over SSH
|
|
cmd := exec.Command("sh", "-c",
|
|
fmt.Sprintf("%s | ssh %s 'zfs recv -F %s'", zfsSendCmd, serverHost, receivePath))
|
|
|
|
cmd.Stdout = os.Stdout
|
|
cmd.Stderr = os.Stderr
|
|
|
|
if err := cmd.Run(); err != nil {
|
|
return fmt.Errorf("failed to send snapshot: %v", err)
|
|
}
|
|
|
|
fmt.Printf("Snapshot sent successfully!\n")
|
|
return nil
|
|
}
|
|
|
|
// RotateLocalSnapshots removes old snapshots based on the retention policy.
|
|
// This is similar to zfs-auto-snapshot's rotation behavior.
|
|
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
|
|
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get dataset: %v", err)
|
|
}
|
|
|
|
snapshots, err := ds.Snapshots()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to list snapshots: %v", err)
|
|
}
|
|
|
|
// Group snapshots by type
|
|
groups := make(map[SnapshotType][]*zfs.Dataset)
|
|
for _, snap := range snapshots {
|
|
snapType := parseSnapshotType(snap.Name)
|
|
groups[snapType] = append(groups[snapType], snap)
|
|
}
|
|
|
|
// Apply retention policy
|
|
deletedCount := 0
|
|
keepCount := map[SnapshotType]int{
|
|
SnapshotHourly: policy.KeepHourly,
|
|
SnapshotDaily: policy.KeepDaily,
|
|
SnapshotWeekly: policy.KeepWeekly,
|
|
SnapshotMonthly: policy.KeepMonthly,
|
|
SnapshotManual: -1, // Keep all manual snapshots
|
|
}
|
|
|
|
for snapType, snaps := range groups {
|
|
maxKeep := keepCount[snapType]
|
|
if maxKeep < 0 {
|
|
continue // Keep all
|
|
}
|
|
|
|
// Sort by creation time (oldest first)
|
|
sortSnapshotsByTime(snaps)
|
|
|
|
// Delete oldest snapshots exceeding the limit
|
|
if len(snaps) > maxKeep {
|
|
toDelete := snaps[:len(snaps)-maxKeep]
|
|
for _, snap := range toDelete {
|
|
fmt.Printf(" Deleting old snapshot: %s\n", snap.Name)
|
|
if err := snap.Destroy(zfs.DestroyDefault); err != nil {
|
|
fmt.Printf(" Warning: failed to delete %s: %v\n", snap.Name, err)
|
|
} else {
|
|
deletedCount++
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if deletedCount > 0 {
|
|
fmt.Printf("✓ Rotated %d local snapshots\n", deletedCount)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// parseSnapshotType extracts the snapshot type from the snapshot name.
|
|
func parseSnapshotType(name string) SnapshotType {
|
|
if strings.Contains(name, "hourly") {
|
|
return SnapshotHourly
|
|
}
|
|
if strings.Contains(name, "daily") {
|
|
return SnapshotDaily
|
|
}
|
|
if strings.Contains(name, "weekly") {
|
|
return SnapshotWeekly
|
|
}
|
|
if strings.Contains(name, "monthly") {
|
|
return SnapshotMonthly
|
|
}
|
|
return SnapshotManual
|
|
}
|
|
|
|
// sortSnapshotsByTime sorts snapshots by creation time (oldest first).
|
|
// Uses the snapshot name which contains timestamp for sorting.
|
|
func sortSnapshotsByTime(snaps []*zfs.Dataset) {
|
|
sort.Slice(snaps, func(i, j int) bool {
|
|
// Extract timestamp from snapshot name for comparison
|
|
// Names are like: dataset@zfs-backup-hourly-2006-01-02_15-04-05
|
|
return snaps[i].Name < snaps[j].Name
|
|
})
|
|
}
|