simplyfy
This commit is contained in:
@@ -4,6 +4,7 @@ package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -13,6 +14,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
"github.com/pierrec/lz4/v4"
|
||||
)
|
||||
@@ -109,10 +114,40 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
return c.sendViaZFS(snapshot, uploadResp.StorageKey)
|
||||
}
|
||||
|
||||
// streamToS3 streams a ZFS snapshot to S3 storage via HTTP.
|
||||
// streamToS3 streams a ZFS snapshot to S3 storage using AWS SDK.
|
||||
// The snapshot is optionally compressed with LZ4 before transmission.
|
||||
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
|
||||
fmt.Printf("→ Streaming snapshot to S3...\n")
|
||||
fmt.Printf("→ Uploading snapshot to S3...\n")
|
||||
|
||||
// Ensure endpoint has valid URI scheme
|
||||
endpoint := c.config.S3Endpoint
|
||||
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
|
||||
// Create AWS config
|
||||
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
config.WithRegion(c.config.S3Region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
|
||||
c.config.S3AccessKey,
|
||||
c.config.S3SecretKey,
|
||||
"",
|
||||
)),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load AWS config: %v", err)
|
||||
}
|
||||
|
||||
// Determine if using custom endpoint (non-AWS)
|
||||
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
|
||||
|
||||
// Create S3 client
|
||||
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if customEndpoint {
|
||||
o.BaseEndpoint = aws.String(endpoint)
|
||||
o.UsePathStyle = true // Required for MinIO compatible storage
|
||||
}
|
||||
})
|
||||
|
||||
// Create ZFS send command
|
||||
cmd := exec.Command("zfs", "send", snapshot.Name)
|
||||
@@ -145,57 +180,24 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
|
||||
reader = pr
|
||||
}
|
||||
|
||||
// Create HTTP request
|
||||
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Set required headers
|
||||
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||
req.Header.Set("X-Storage-Key", storageKey)
|
||||
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
// Send request with no timeout for large uploads
|
||||
client := &http.Client{
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
httpResp, err := client.Do(req)
|
||||
if err != nil {
|
||||
cmd.Process.Kill()
|
||||
return fmt.Errorf("failed to upload: %v", err)
|
||||
}
|
||||
defer httpResp.Body.Close()
|
||||
|
||||
if httpResp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(httpResp.Body)
|
||||
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
||||
}
|
||||
// Upload to S3 using PutObject
|
||||
_, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.config.S3Bucket),
|
||||
Key: aws.String(storageKey),
|
||||
Body: reader,
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
|
||||
// Wait for zfs send to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("zfs send failed: %v", err)
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
Size int64 `json:"size"`
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload to S3: %v", err)
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return fmt.Errorf("upload failed: %s", result.Message)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
||||
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
||||
fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -234,6 +236,55 @@ func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SnapshotResult contains the result of a snapshot creation and send operation.
|
||||
type SnapshotResult struct {
|
||||
FullBackup bool
|
||||
Snapshot *zfs.Dataset
|
||||
}
|
||||
|
||||
// CreateAndSend creates a snapshot and sends it to the backup server.
|
||||
// It automatically detects if this is a full or incremental backup:
|
||||
// - If no bookmark exists, does a full backup
|
||||
// - If bookmark exists, does an incremental backup from the bookmark
|
||||
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
||||
// Check for existing bookmark to determine backup type
|
||||
lastBookmark, err := c.GetLastBookmark()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
|
||||
}
|
||||
|
||||
// Create new snapshot
|
||||
snapshot, err := c.CreateSnapshot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
isFullBackup := lastBookmark == ""
|
||||
if isFullBackup {
|
||||
fmt.Println("→ No previous backup found, doing FULL backup...")
|
||||
// Send as full (no base)
|
||||
if err := c.SendIncremental(snapshot, ""); err != nil {
|
||||
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...", lastBookmark)
|
||||
// Send as incremental from bookmark
|
||||
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
|
||||
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create bookmark for future incremental backups
|
||||
if err := c.CreateBookmark(snapshot); err != nil {
|
||||
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||
}
|
||||
|
||||
return &SnapshotResult{
|
||||
FullBackup: isFullBackup,
|
||||
Snapshot: snapshot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetStatus retrieves and displays the client's backup status from the server.
|
||||
// Shows storage usage, quota, and snapshot count.
|
||||
func (c *Client) GetStatus() error {
|
||||
@@ -273,106 +324,3 @@ func (c *Client) GetStatus() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestRotation asks the server to rotate old snapshots.
|
||||
// This deletes the oldest snapshots to free up space.
|
||||
func (c *Client) RequestRotation() error {
|
||||
reqBody, _ := json.Marshal(map[string]string{
|
||||
"client_id": c.config.ClientID,
|
||||
"api_key": c.config.APIKey,
|
||||
})
|
||||
|
||||
resp, err := http.Post(c.config.ServerURL+"/rotate", "application/json", bytes.NewBuffer(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to request rotation: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var rotateResp struct {
|
||||
Success bool `json:"success"`
|
||||
DeletedCount int `json:"deleted_count"`
|
||||
ReclaimedBytes int64 `json:"reclaimed_bytes"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rotateResp); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !rotateResp.Success {
|
||||
return fmt.Errorf("rotation failed")
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Rotation complete\n")
|
||||
fmt.Printf(" Deleted: %d snapshots\n", rotateResp.DeletedCount)
|
||||
fmt.Printf(" Freed: %.2f GB\n", float64(rotateResp.ReclaimedBytes)/(1024*1024*1024))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServerRotationPolicy represents the rotation policy response from the server
|
||||
type ServerRotationPolicy struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
RotationPolicy *SnapshotPolicy `json:"rotation_policy"`
|
||||
ServerManaged bool `json:"server_managed"`
|
||||
}
|
||||
|
||||
// GetRotationPolicy fetches the rotation policy from the server.
|
||||
// If the server has a policy configured for this client, it must be used.
|
||||
// Returns the policy and whether it's server-managed (mandatory).
|
||||
func (c *Client) GetRotationPolicy() (*ServerRotationPolicy, error) {
|
||||
url := fmt.Sprintf("%s/rotation-policy?client_id=%s&api_key=%s",
|
||||
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get rotation policy: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var policyResp ServerRotationPolicy
|
||||
if err := json.NewDecoder(resp.Body).Decode(&policyResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !policyResp.Success {
|
||||
return nil, fmt.Errorf("failed to get rotation policy: %s", policyResp.Message)
|
||||
}
|
||||
|
||||
return &policyResp, nil
|
||||
}
|
||||
|
||||
// ChangePassword changes the client's API key on the server.
|
||||
// Requires the current API key for authentication and the new key.
|
||||
func (c *Client) ChangePassword(newAPIKey string) error {
|
||||
reqBody, _ := json.Marshal(map[string]string{
|
||||
"client_id": c.config.ClientID,
|
||||
"current_key": c.config.APIKey,
|
||||
"new_key": newAPIKey,
|
||||
})
|
||||
|
||||
resp, err := http.Post(c.config.ServerURL+"/client/change-password", "application/json", bytes.NewBuffer(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to change password: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return fmt.Errorf("failed to change password: %s", result.Message)
|
||||
}
|
||||
|
||||
// Update local config with new key
|
||||
c.config.APIKey = newAPIKey
|
||||
|
||||
fmt.Printf("✓ Password changed successfully\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,16 @@ type Config struct {
|
||||
LocalDataset string `json:"local_dataset"`
|
||||
// Compress enables LZ4 compression for transfers
|
||||
Compress bool `json:"compress"`
|
||||
// S3Endpoint is the S3 endpoint URL (optional, for direct S3 uploads)
|
||||
S3Endpoint string `json:"s3_endpoint"`
|
||||
// S3Region is the AWS region
|
||||
S3Region string `json:"s3_region"`
|
||||
// S3Bucket is the S3 bucket name
|
||||
S3Bucket string `json:"s3_bucket"`
|
||||
// S3AccessKey is the AWS access key
|
||||
S3AccessKey string `json:"s3_access_key"`
|
||||
// S3SecretKey is the AWS secret key
|
||||
S3SecretKey string `json:"s3_secret_key"`
|
||||
}
|
||||
|
||||
// LoadConfig loads client configuration from environment variables and .env file.
|
||||
@@ -35,6 +45,11 @@ func LoadConfig() *Config {
|
||||
ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"),
|
||||
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
|
||||
Compress: getEnv("COMPRESS", "true") == "true",
|
||||
S3Endpoint: getEnv("S3_ENDPOINT", ""),
|
||||
S3Region: getEnv("S3_REGION", "us-east-1"),
|
||||
S3Bucket: getEnv("S3_BUCKET", "zfs-backups"),
|
||||
S3AccessKey: getEnv("S3_ACCESS_KEY", ""),
|
||||
S3SecretKey: getEnv("S3_SECRET_KEY", ""),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,77 +1,27 @@
|
||||
// Package client provides ZFS snapshot backup client functionality.
|
||||
// This file contains snapshot management functions including creation,
|
||||
// bookmarking, and rotation similar to zfs-auto-snapshot.
|
||||
// This file contains snapshot management functions for creating and sending snapshots.
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
"github.com/pierrec/lz4/v4"
|
||||
)
|
||||
|
||||
// SnapshotPolicy defines retention settings for automatic snapshots.
|
||||
type SnapshotPolicy struct {
|
||||
// KeepHourly is the number of hourly snapshots to keep
|
||||
KeepHourly int
|
||||
// KeepDaily is the number of daily snapshots to keep
|
||||
KeepDaily int
|
||||
// KeepWeekly is the number of weekly snapshots to keep
|
||||
KeepWeekly int
|
||||
// KeepMonthly is the number of monthly snapshots to keep
|
||||
KeepMonthly int
|
||||
}
|
||||
|
||||
// DefaultPolicy returns the default snapshot retention policy.
|
||||
func DefaultPolicy() *SnapshotPolicy {
|
||||
return &SnapshotPolicy{
|
||||
KeepHourly: 24,
|
||||
KeepDaily: 7,
|
||||
KeepWeekly: 4,
|
||||
KeepMonthly: 12,
|
||||
}
|
||||
}
|
||||
|
||||
// SnapshotType represents the type of snapshot (hourly, daily, etc.)
|
||||
type SnapshotType string
|
||||
|
||||
const (
|
||||
SnapshotHourly SnapshotType = "hourly"
|
||||
SnapshotDaily SnapshotType = "daily"
|
||||
SnapshotWeekly SnapshotType = "weekly"
|
||||
SnapshotMonthly SnapshotType = "monthly"
|
||||
SnapshotManual SnapshotType = "manual"
|
||||
)
|
||||
|
||||
// CreateSnapshotWithType creates a snapshot with a specific type label.
|
||||
// The snapshot name follows the pattern: zfs-backup-<type>-<timestamp>
|
||||
func (c *Client) CreateSnapshotWithType(snapshotType SnapshotType) (*zfs.Dataset, error) {
|
||||
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||
}
|
||||
|
||||
timestamp := time.Now().Format("2006-01-02_15-04-05")
|
||||
snapshotName := fmt.Sprintf("zfs-backup-%s-%s", snapshotType, timestamp)
|
||||
|
||||
snapshot, err := ds.Snapshot(snapshotName, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Created %s snapshot: %s@%s\n", snapshotType, c.config.LocalDataset, snapshotName)
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// CreateBookmark creates a ZFS bookmark from a snapshot.
|
||||
// Bookmarks allow incremental sends even after the source snapshot is deleted.
|
||||
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
|
||||
@@ -194,9 +144,39 @@ func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
|
||||
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
|
||||
}
|
||||
|
||||
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3.
|
||||
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3 using AWS SDK.
|
||||
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
||||
fmt.Printf("→ Streaming snapshot to S3...\n")
|
||||
fmt.Printf("→ Uploading snapshot to S3...\n")
|
||||
|
||||
// Ensure endpoint has valid URI scheme
|
||||
endpoint := c.config.S3Endpoint
|
||||
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
|
||||
// Create AWS config
|
||||
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
config.WithRegion(c.config.S3Region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
|
||||
c.config.S3AccessKey,
|
||||
c.config.S3SecretKey,
|
||||
"",
|
||||
)),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load AWS config: %v", err)
|
||||
}
|
||||
|
||||
// Determine if using custom endpoint (non-AWS)
|
||||
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
|
||||
|
||||
// Create S3 client
|
||||
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if customEndpoint {
|
||||
o.BaseEndpoint = aws.String(endpoint)
|
||||
o.UsePathStyle = true // Required for MinIO compatible storage
|
||||
}
|
||||
})
|
||||
|
||||
// Create ZFS send command
|
||||
var cmd *exec.Cmd
|
||||
@@ -238,61 +218,24 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
|
||||
reader = pr
|
||||
}
|
||||
|
||||
// Create HTTP request
|
||||
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Set required headers
|
||||
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||
req.Header.Set("X-Storage-Key", storageKey)
|
||||
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
|
||||
if base != "" {
|
||||
req.Header.Set("X-Base-Snapshot", base)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
// Send request with no timeout for large uploads
|
||||
client := &http.Client{
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
httpResp, err := client.Do(req)
|
||||
if err != nil {
|
||||
cmd.Process.Kill()
|
||||
return fmt.Errorf("failed to upload: %v", err)
|
||||
}
|
||||
defer httpResp.Body.Close()
|
||||
|
||||
if httpResp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(httpResp.Body)
|
||||
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
||||
}
|
||||
// Upload to S3 using PutObject
|
||||
_, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.config.S3Bucket),
|
||||
Key: aws.String(storageKey),
|
||||
Body: reader,
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
|
||||
// Wait for zfs send to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("zfs send failed: %v", err)
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
Size int64 `json:"size"`
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload to S3: %v", err)
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return fmt.Errorf("upload failed: %s", result.Message)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
||||
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
||||
fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -300,7 +243,7 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
|
||||
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
|
||||
// This method is used when the server uses local ZFS storage.
|
||||
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
|
||||
fmt.Printf("-> Sending via ZFS send/receive...\n")
|
||||
fmt.Printf("→ Sending via ZFS send/receive...\n")
|
||||
|
||||
// Extract server host from URL
|
||||
serverHost := c.config.ServerURL
|
||||
@@ -337,93 +280,6 @@ func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath
|
||||
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Snapshot sent successfully!\n")
|
||||
fmt.Printf("✓ Snapshot sent successfully!\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// RotateLocalSnapshots removes old snapshots based on the retention policy.
|
||||
// This is similar to zfs-auto-snapshot's rotation behavior.
|
||||
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
|
||||
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get dataset: %v", err)
|
||||
}
|
||||
|
||||
snapshots, err := ds.Snapshots()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list snapshots: %v", err)
|
||||
}
|
||||
|
||||
// Group snapshots by type
|
||||
groups := make(map[SnapshotType][]*zfs.Dataset)
|
||||
for _, snap := range snapshots {
|
||||
snapType := parseSnapshotType(snap.Name)
|
||||
groups[snapType] = append(groups[snapType], snap)
|
||||
}
|
||||
|
||||
// Apply retention policy
|
||||
deletedCount := 0
|
||||
keepCount := map[SnapshotType]int{
|
||||
SnapshotHourly: policy.KeepHourly,
|
||||
SnapshotDaily: policy.KeepDaily,
|
||||
SnapshotWeekly: policy.KeepWeekly,
|
||||
SnapshotMonthly: policy.KeepMonthly,
|
||||
SnapshotManual: -1, // Keep all manual snapshots
|
||||
}
|
||||
|
||||
for snapType, snaps := range groups {
|
||||
maxKeep := keepCount[snapType]
|
||||
if maxKeep < 0 {
|
||||
continue // Keep all
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sortSnapshotsByTime(snaps)
|
||||
|
||||
// Delete oldest snapshots exceeding the limit
|
||||
if len(snaps) > maxKeep {
|
||||
toDelete := snaps[:len(snaps)-maxKeep]
|
||||
for _, snap := range toDelete {
|
||||
fmt.Printf(" Deleting old snapshot: %s\n", snap.Name)
|
||||
if err := snap.Destroy(zfs.DestroyDefault); err != nil {
|
||||
fmt.Printf(" Warning: failed to delete %s: %v\n", snap.Name, err)
|
||||
} else {
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if deletedCount > 0 {
|
||||
fmt.Printf("✓ Rotated %d local snapshots\n", deletedCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseSnapshotType extracts the snapshot type from the snapshot name.
|
||||
func parseSnapshotType(name string) SnapshotType {
|
||||
if strings.Contains(name, "hourly") {
|
||||
return SnapshotHourly
|
||||
}
|
||||
if strings.Contains(name, "daily") {
|
||||
return SnapshotDaily
|
||||
}
|
||||
if strings.Contains(name, "weekly") {
|
||||
return SnapshotWeekly
|
||||
}
|
||||
if strings.Contains(name, "monthly") {
|
||||
return SnapshotMonthly
|
||||
}
|
||||
return SnapshotManual
|
||||
}
|
||||
|
||||
// sortSnapshotsByTime sorts snapshots by creation time (oldest first).
|
||||
// Uses the snapshot name which contains timestamp for sorting.
|
||||
func sortSnapshotsByTime(snaps []*zfs.Dataset) {
|
||||
sort.Slice(snaps, func(i, j int) bool {
|
||||
// Extract timestamp from snapshot name for comparison
|
||||
// Names are like: dataset@zfs-backup-hourly-2006-01-02_15-04-05
|
||||
return snaps[i].Name < snaps[j].Name
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,7 +13,8 @@ type Config struct {
|
||||
S3SecretKey string
|
||||
S3BucketName string
|
||||
S3UseSSL bool
|
||||
S3Enabled bool // Enable/disable S3 backend
|
||||
S3Enabled bool // Enable/disable S3 backend
|
||||
S3Region string // AWS region
|
||||
BaseDataset string
|
||||
DatabasePath string // Path to SQLite database
|
||||
Port string
|
||||
@@ -40,6 +41,7 @@ func LoadConfig() *Config {
|
||||
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
|
||||
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
|
||||
S3Enabled: s3Enabled,
|
||||
S3Region: getEnv("S3_REGION", "us-east-1"),
|
||||
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
|
||||
DatabasePath: getEnv("DATABASE_PATH", "zfs-backup.db"),
|
||||
Port: getEnv("PORT", "8080"),
|
||||
|
||||
@@ -7,10 +7,12 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"time"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
)
|
||||
|
||||
@@ -23,95 +25,119 @@ type StorageBackend interface {
|
||||
GetSize(ctx context.Context, key string) (int64, error)
|
||||
}
|
||||
|
||||
// S3Backend implements StorageBackend for S3-compatible storage
|
||||
// S3Backend implements StorageBackend for S3-compatible storage using AWS SDK v2
|
||||
type S3Backend struct {
|
||||
client *minio.Client
|
||||
client *s3.Client
|
||||
bucketName string
|
||||
}
|
||||
|
||||
// NewS3Backend creates a new S3 storage backend
|
||||
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
|
||||
// Create custom HTTP transport with extended timeouts for large file uploads
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
// Extended timeouts for streaming large ZFS snapshots
|
||||
ResponseHeaderTimeout: 5 * time.Minute,
|
||||
ExpectContinueTimeout: 30 * time.Second,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
// Connection pooling
|
||||
MaxIdleConns: 10,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
DisableCompression: false,
|
||||
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool, region string) (*S3Backend, error) {
|
||||
// Ensure endpoint has valid URI scheme
|
||||
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
if useSSL {
|
||||
endpoint = "https://" + endpoint
|
||||
} else {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
}
|
||||
|
||||
client, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
|
||||
Secure: useSSL,
|
||||
Transport: transport,
|
||||
// Determine if using custom endpoint (non-AWS)
|
||||
customEndpoint := endpoint != "" && endpoint != "https://s3.amazonaws.com" && endpoint != "http://s3.amazonaws.com"
|
||||
|
||||
// Load AWS config
|
||||
awsCfg, err := config.LoadDefaultConfig(context.Background(),
|
||||
config.WithRegion(region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load AWS config: %v", err)
|
||||
}
|
||||
|
||||
// Create S3 client
|
||||
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if customEndpoint {
|
||||
o.BaseEndpoint = aws.String(endpoint)
|
||||
o.UsePathStyle = true // Required for MinIO and other S3-compatible storage
|
||||
}
|
||||
// Set HTTP client with extended timeout for large uploads
|
||||
o.HTTPClient = &http.Client{
|
||||
Timeout: 0, // No timeout for large file uploads
|
||||
}
|
||||
})
|
||||
|
||||
// Check if bucket exists (or create it for AWS S3)
|
||||
ctx := context.Background()
|
||||
_, err = s3Client.HeadBucket(ctx, &s3.HeadBucketInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create S3 client: %v", err)
|
||||
}
|
||||
|
||||
// Ensure bucket exists
|
||||
ctx := context.Background()
|
||||
exists, err := client.BucketExists(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check bucket: %v", err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
|
||||
// Try to create bucket
|
||||
_, err = s3Client.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create bucket: %v", err)
|
||||
log.Printf("Warning: failed to create bucket: %v", err)
|
||||
} else {
|
||||
log.Printf("Created S3 bucket: %s", bucketName)
|
||||
}
|
||||
log.Printf("Created S3 bucket: %s", bucketName)
|
||||
}
|
||||
|
||||
return &S3Backend{
|
||||
client: client,
|
||||
client: s3Client,
|
||||
bucketName: bucketName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Upload uploads data to S3
|
||||
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
|
||||
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
|
||||
minio.PutObjectOptions{
|
||||
ContentType: "application/octet-stream",
|
||||
PartSize: 10 * 1024 * 1024, // 10MB parts
|
||||
})
|
||||
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
Body: data,
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Download retrieves data from S3
|
||||
func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||
obj, err := s.client.GetObject(ctx, s.bucketName, key, minio.GetObjectOptions{})
|
||||
resp, err := s.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Delete removes an object from S3
|
||||
func (s *S3Backend) Delete(ctx context.Context, key string) error {
|
||||
return s.client.RemoveObject(ctx, s.bucketName, key, minio.RemoveObjectOptions{})
|
||||
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// List returns all objects with the given prefix
|
||||
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
var keys []string
|
||||
|
||||
objectCh := s.client.ListObjects(ctx, s.bucketName, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: true,
|
||||
paginator := s3.NewListObjectsV2Paginator(s.client, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Prefix: aws.String(prefix),
|
||||
})
|
||||
|
||||
for object := range objectCh {
|
||||
if object.Err != nil {
|
||||
return nil, object.Err
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, obj := range page.Contents {
|
||||
keys = append(keys, *obj.Key)
|
||||
}
|
||||
keys = append(keys, object.Key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
@@ -119,11 +145,14 @@ func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
|
||||
// GetSize returns the size of an object in S3
|
||||
func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) {
|
||||
info, err := s.client.StatObject(ctx, s.bucketName, key, minio.StatObjectOptions{})
|
||||
info, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return info.Size, nil
|
||||
return *info.ContentLength, nil
|
||||
}
|
||||
|
||||
// LocalBackend implements StorageBackend for local ZFS storage
|
||||
|
||||
@@ -260,4 +260,5 @@ templ ClientPasswordModal() {
|
||||
// AdminScripts renders the JavaScript for the admin panel
|
||||
templ AdminScripts() {
|
||||
<script src="/admin/static/admin.js"></script>
|
||||
<script>initTheme();</script>
|
||||
}
|
||||
|
||||
@@ -19,6 +19,30 @@ async function logout() {
|
||||
location.reload();
|
||||
}
|
||||
|
||||
// Toggle dark/light theme
|
||||
function toggleTheme() {
|
||||
const html = document.documentElement;
|
||||
const isDark = html.classList.contains('dark');
|
||||
|
||||
if (isDark) {
|
||||
html.classList.remove('dark');
|
||||
localStorage.setItem('theme', 'light');
|
||||
} else {
|
||||
html.classList.add('dark');
|
||||
localStorage.setItem('theme', 'dark');
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize theme on load
|
||||
function initTheme() {
|
||||
const savedTheme = localStorage.getItem('theme');
|
||||
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
|
||||
|
||||
if (savedTheme === 'dark' || (!savedTheme && prefersDark)) {
|
||||
document.documentElement.classList.add('dark');
|
||||
}
|
||||
}
|
||||
|
||||
// Load stats
|
||||
async function loadStats() {
|
||||
try {
|
||||
|
||||
Reference in New Issue
Block a user