This commit is contained in:
2026-02-14 19:09:05 +01:00
parent 05c916e9a9
commit 5892ac2a2e
13 changed files with 394 additions and 721 deletions

View File

@@ -4,6 +4,7 @@ package client
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
@@ -13,6 +14,10 @@ import (
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/mistifyio/go-zfs"
"github.com/pierrec/lz4/v4"
)
@@ -109,10 +114,40 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
return c.sendViaZFS(snapshot, uploadResp.StorageKey)
}
// streamToS3 streams a ZFS snapshot to S3 storage via HTTP.
// streamToS3 streams a ZFS snapshot to S3 storage using AWS SDK.
// The snapshot is optionally compressed with LZ4 before transmission.
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
fmt.Printf("→ Streaming snapshot to S3...\n")
fmt.Printf("→ Uploading snapshot to S3...\n")
// Ensure endpoint has valid URI scheme
endpoint := c.config.S3Endpoint
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
endpoint = "http://" + endpoint
}
// Create AWS config
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion(c.config.S3Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
c.config.S3AccessKey,
c.config.S3SecretKey,
"",
)),
)
if err != nil {
return fmt.Errorf("failed to load AWS config: %v", err)
}
// Determine if using custom endpoint (non-AWS)
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
// Create S3 client
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if customEndpoint {
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = true // Required for MinIO compatible storage
}
})
// Create ZFS send command
cmd := exec.Command("zfs", "send", snapshot.Name)
@@ -145,57 +180,24 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
reader = pr
}
// Create HTTP request
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
}
// Set required headers
req.Header.Set("X-API-Key", c.config.APIKey)
req.Header.Set("X-Storage-Key", storageKey)
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
req.Header.Set("Content-Type", "application/octet-stream")
// Send request with no timeout for large uploads
client := &http.Client{
Timeout: 0,
}
httpResp, err := client.Do(req)
if err != nil {
cmd.Process.Kill()
return fmt.Errorf("failed to upload: %v", err)
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(httpResp.Body)
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
}
// Upload to S3 using PutObject
_, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(c.config.S3Bucket),
Key: aws.String(storageKey),
Body: reader,
ContentType: aws.String("application/octet-stream"),
})
// Wait for zfs send to complete
if err := cmd.Wait(); err != nil {
return fmt.Errorf("zfs send failed: %v", err)
}
// Parse response
var result struct {
Success bool `json:"success"`
Message string `json:"message"`
Size int64 `json:"size"`
if err != nil {
return fmt.Errorf("failed to upload to S3: %v", err)
}
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("upload failed: %s", result.Message)
}
fmt.Printf("✓ Snapshot uploaded successfully!\n")
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
return nil
}
@@ -234,6 +236,55 @@ func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
return nil
}
// SnapshotResult contains the result of a snapshot creation and send operation.
type SnapshotResult struct {
FullBackup bool
Snapshot *zfs.Dataset
}
// CreateAndSend creates a snapshot and sends it to the backup server.
// It automatically detects if this is a full or incremental backup:
// - If no bookmark exists, does a full backup
// - If bookmark exists, does an incremental backup from the bookmark
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
// Check for existing bookmark to determine backup type
lastBookmark, err := c.GetLastBookmark()
if err != nil {
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
}
// Create new snapshot
snapshot, err := c.CreateSnapshot()
if err != nil {
return nil, fmt.Errorf("failed to create snapshot: %v", err)
}
isFullBackup := lastBookmark == ""
if isFullBackup {
fmt.Println("→ No previous backup found, doing FULL backup...")
// Send as full (no base)
if err := c.SendIncremental(snapshot, ""); err != nil {
return nil, fmt.Errorf("failed to send snapshot: %v", err)
}
} else {
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...", lastBookmark)
// Send as incremental from bookmark
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
return nil, fmt.Errorf("failed to send incremental: %v", err)
}
}
// Create bookmark for future incremental backups
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
return &SnapshotResult{
FullBackup: isFullBackup,
Snapshot: snapshot,
}, nil
}
// GetStatus retrieves and displays the client's backup status from the server.
// Shows storage usage, quota, and snapshot count.
func (c *Client) GetStatus() error {
@@ -273,106 +324,3 @@ func (c *Client) GetStatus() error {
return nil
}
// RequestRotation asks the server to rotate old snapshots.
// This deletes the oldest snapshots to free up space.
func (c *Client) RequestRotation() error {
reqBody, _ := json.Marshal(map[string]string{
"client_id": c.config.ClientID,
"api_key": c.config.APIKey,
})
resp, err := http.Post(c.config.ServerURL+"/rotate", "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to request rotation: %v", err)
}
defer resp.Body.Close()
var rotateResp struct {
Success bool `json:"success"`
DeletedCount int `json:"deleted_count"`
ReclaimedBytes int64 `json:"reclaimed_bytes"`
}
if err := json.NewDecoder(resp.Body).Decode(&rotateResp); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !rotateResp.Success {
return fmt.Errorf("rotation failed")
}
fmt.Printf("✓ Rotation complete\n")
fmt.Printf(" Deleted: %d snapshots\n", rotateResp.DeletedCount)
fmt.Printf(" Freed: %.2f GB\n", float64(rotateResp.ReclaimedBytes)/(1024*1024*1024))
return nil
}
// ServerRotationPolicy represents the rotation policy response from the server
type ServerRotationPolicy struct {
Success bool `json:"success"`
Message string `json:"message"`
RotationPolicy *SnapshotPolicy `json:"rotation_policy"`
ServerManaged bool `json:"server_managed"`
}
// GetRotationPolicy fetches the rotation policy from the server.
// If the server has a policy configured for this client, it must be used.
// Returns the policy and whether it's server-managed (mandatory).
func (c *Client) GetRotationPolicy() (*ServerRotationPolicy, error) {
url := fmt.Sprintf("%s/rotation-policy?client_id=%s&api_key=%s",
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("failed to get rotation policy: %v", err)
}
defer resp.Body.Close()
var policyResp ServerRotationPolicy
if err := json.NewDecoder(resp.Body).Decode(&policyResp); err != nil {
return nil, fmt.Errorf("failed to decode response: %v", err)
}
if !policyResp.Success {
return nil, fmt.Errorf("failed to get rotation policy: %s", policyResp.Message)
}
return &policyResp, nil
}
// ChangePassword changes the client's API key on the server.
// Requires the current API key for authentication and the new key.
func (c *Client) ChangePassword(newAPIKey string) error {
reqBody, _ := json.Marshal(map[string]string{
"client_id": c.config.ClientID,
"current_key": c.config.APIKey,
"new_key": newAPIKey,
})
resp, err := http.Post(c.config.ServerURL+"/client/change-password", "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to change password: %v", err)
}
defer resp.Body.Close()
var result struct {
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("failed to change password: %s", result.Message)
}
// Update local config with new key
c.config.APIKey = newAPIKey
fmt.Printf("✓ Password changed successfully\n")
return nil
}

View File

@@ -21,6 +21,16 @@ type Config struct {
LocalDataset string `json:"local_dataset"`
// Compress enables LZ4 compression for transfers
Compress bool `json:"compress"`
// S3Endpoint is the S3 endpoint URL (optional, for direct S3 uploads)
S3Endpoint string `json:"s3_endpoint"`
// S3Region is the AWS region
S3Region string `json:"s3_region"`
// S3Bucket is the S3 bucket name
S3Bucket string `json:"s3_bucket"`
// S3AccessKey is the AWS access key
S3AccessKey string `json:"s3_access_key"`
// S3SecretKey is the AWS secret key
S3SecretKey string `json:"s3_secret_key"`
}
// LoadConfig loads client configuration from environment variables and .env file.
@@ -35,6 +45,11 @@ func LoadConfig() *Config {
ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"),
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
Compress: getEnv("COMPRESS", "true") == "true",
S3Endpoint: getEnv("S3_ENDPOINT", ""),
S3Region: getEnv("S3_REGION", "us-east-1"),
S3Bucket: getEnv("S3_BUCKET", "zfs-backups"),
S3AccessKey: getEnv("S3_ACCESS_KEY", ""),
S3SecretKey: getEnv("S3_SECRET_KEY", ""),
}
}

View File

@@ -1,77 +1,27 @@
// Package client provides ZFS snapshot backup client functionality.
// This file contains snapshot management functions including creation,
// bookmarking, and rotation similar to zfs-auto-snapshot.
// This file contains snapshot management functions for creating and sending snapshots.
package client
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/mistifyio/go-zfs"
"github.com/pierrec/lz4/v4"
)
// SnapshotPolicy defines retention settings for automatic snapshots.
type SnapshotPolicy struct {
// KeepHourly is the number of hourly snapshots to keep
KeepHourly int
// KeepDaily is the number of daily snapshots to keep
KeepDaily int
// KeepWeekly is the number of weekly snapshots to keep
KeepWeekly int
// KeepMonthly is the number of monthly snapshots to keep
KeepMonthly int
}
// DefaultPolicy returns the default snapshot retention policy.
func DefaultPolicy() *SnapshotPolicy {
return &SnapshotPolicy{
KeepHourly: 24,
KeepDaily: 7,
KeepWeekly: 4,
KeepMonthly: 12,
}
}
// SnapshotType represents the type of snapshot (hourly, daily, etc.)
type SnapshotType string
const (
SnapshotHourly SnapshotType = "hourly"
SnapshotDaily SnapshotType = "daily"
SnapshotWeekly SnapshotType = "weekly"
SnapshotMonthly SnapshotType = "monthly"
SnapshotManual SnapshotType = "manual"
)
// CreateSnapshotWithType creates a snapshot with a specific type label.
// The snapshot name follows the pattern: zfs-backup-<type>-<timestamp>
func (c *Client) CreateSnapshotWithType(snapshotType SnapshotType) (*zfs.Dataset, error) {
ds, err := zfs.GetDataset(c.config.LocalDataset)
if err != nil {
return nil, fmt.Errorf("failed to get dataset: %v", err)
}
timestamp := time.Now().Format("2006-01-02_15-04-05")
snapshotName := fmt.Sprintf("zfs-backup-%s-%s", snapshotType, timestamp)
snapshot, err := ds.Snapshot(snapshotName, false)
if err != nil {
return nil, fmt.Errorf("failed to create snapshot: %v", err)
}
fmt.Printf("✓ Created %s snapshot: %s@%s\n", snapshotType, c.config.LocalDataset, snapshotName)
return snapshot, nil
}
// CreateBookmark creates a ZFS bookmark from a snapshot.
// Bookmarks allow incremental sends even after the source snapshot is deleted.
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
@@ -194,9 +144,39 @@ func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
}
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3.
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3 using AWS SDK.
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
fmt.Printf("→ Streaming snapshot to S3...\n")
fmt.Printf("→ Uploading snapshot to S3...\n")
// Ensure endpoint has valid URI scheme
endpoint := c.config.S3Endpoint
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
endpoint = "http://" + endpoint
}
// Create AWS config
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion(c.config.S3Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
c.config.S3AccessKey,
c.config.S3SecretKey,
"",
)),
)
if err != nil {
return fmt.Errorf("failed to load AWS config: %v", err)
}
// Determine if using custom endpoint (non-AWS)
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
// Create S3 client
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if customEndpoint {
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = true // Required for MinIO compatible storage
}
})
// Create ZFS send command
var cmd *exec.Cmd
@@ -238,61 +218,24 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
reader = pr
}
// Create HTTP request
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
}
// Set required headers
req.Header.Set("X-API-Key", c.config.APIKey)
req.Header.Set("X-Storage-Key", storageKey)
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
if base != "" {
req.Header.Set("X-Base-Snapshot", base)
}
req.Header.Set("Content-Type", "application/octet-stream")
// Send request with no timeout for large uploads
client := &http.Client{
Timeout: 0,
}
httpResp, err := client.Do(req)
if err != nil {
cmd.Process.Kill()
return fmt.Errorf("failed to upload: %v", err)
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(httpResp.Body)
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
}
// Upload to S3 using PutObject
_, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(c.config.S3Bucket),
Key: aws.String(storageKey),
Body: reader,
ContentType: aws.String("application/octet-stream"),
})
// Wait for zfs send to complete
if err := cmd.Wait(); err != nil {
return fmt.Errorf("zfs send failed: %v", err)
}
// Parse response
var result struct {
Success bool `json:"success"`
Message string `json:"message"`
Size int64 `json:"size"`
if err != nil {
return fmt.Errorf("failed to upload to S3: %v", err)
}
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("upload failed: %s", result.Message)
}
fmt.Printf("✓ Snapshot uploaded successfully!\n")
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
return nil
}
@@ -300,7 +243,7 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
// This method is used when the server uses local ZFS storage.
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
fmt.Printf("-> Sending via ZFS send/receive...\n")
fmt.Printf(" Sending via ZFS send/receive...\n")
// Extract server host from URL
serverHost := c.config.ServerURL
@@ -337,93 +280,6 @@ func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath
return fmt.Errorf("failed to send snapshot: %v", err)
}
fmt.Printf("Snapshot sent successfully!\n")
fmt.Printf("Snapshot sent successfully!\n")
return nil
}
// RotateLocalSnapshots removes old snapshots based on the retention policy.
// This is similar to zfs-auto-snapshot's rotation behavior.
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
ds, err := zfs.GetDataset(c.config.LocalDataset)
if err != nil {
return fmt.Errorf("failed to get dataset: %v", err)
}
snapshots, err := ds.Snapshots()
if err != nil {
return fmt.Errorf("failed to list snapshots: %v", err)
}
// Group snapshots by type
groups := make(map[SnapshotType][]*zfs.Dataset)
for _, snap := range snapshots {
snapType := parseSnapshotType(snap.Name)
groups[snapType] = append(groups[snapType], snap)
}
// Apply retention policy
deletedCount := 0
keepCount := map[SnapshotType]int{
SnapshotHourly: policy.KeepHourly,
SnapshotDaily: policy.KeepDaily,
SnapshotWeekly: policy.KeepWeekly,
SnapshotMonthly: policy.KeepMonthly,
SnapshotManual: -1, // Keep all manual snapshots
}
for snapType, snaps := range groups {
maxKeep := keepCount[snapType]
if maxKeep < 0 {
continue // Keep all
}
// Sort by creation time (oldest first)
sortSnapshotsByTime(snaps)
// Delete oldest snapshots exceeding the limit
if len(snaps) > maxKeep {
toDelete := snaps[:len(snaps)-maxKeep]
for _, snap := range toDelete {
fmt.Printf(" Deleting old snapshot: %s\n", snap.Name)
if err := snap.Destroy(zfs.DestroyDefault); err != nil {
fmt.Printf(" Warning: failed to delete %s: %v\n", snap.Name, err)
} else {
deletedCount++
}
}
}
}
if deletedCount > 0 {
fmt.Printf("✓ Rotated %d local snapshots\n", deletedCount)
}
return nil
}
// parseSnapshotType extracts the snapshot type from the snapshot name.
func parseSnapshotType(name string) SnapshotType {
if strings.Contains(name, "hourly") {
return SnapshotHourly
}
if strings.Contains(name, "daily") {
return SnapshotDaily
}
if strings.Contains(name, "weekly") {
return SnapshotWeekly
}
if strings.Contains(name, "monthly") {
return SnapshotMonthly
}
return SnapshotManual
}
// sortSnapshotsByTime sorts snapshots by creation time (oldest first).
// Uses the snapshot name which contains timestamp for sorting.
func sortSnapshotsByTime(snaps []*zfs.Dataset) {
sort.Slice(snaps, func(i, j int) bool {
// Extract timestamp from snapshot name for comparison
// Names are like: dataset@zfs-backup-hourly-2006-01-02_15-04-05
return snaps[i].Name < snaps[j].Name
})
}