remove s3 from client
This commit is contained in:
@@ -4,27 +4,24 @@ package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
"github.com/pierrec/lz4/v4"
|
||||
)
|
||||
|
||||
// SnapshotResult contains the result of a snapshot creation and send operation.
|
||||
type SnapshotResult struct {
|
||||
FullBackup bool
|
||||
Snapshot *zfs.Dataset
|
||||
}
|
||||
|
||||
// Client handles snapshot backup operations to a remote server.
|
||||
// It manages creating local ZFS snapshots and transmitting them
|
||||
// to the backup server via HTTP or SSH.
|
||||
type Client struct {
|
||||
config *Config
|
||||
}
|
||||
@@ -34,11 +31,51 @@ func New(config *Config) *Client {
|
||||
return &Client{config: config}
|
||||
}
|
||||
|
||||
// CreateAndSend creates a snapshot and sends it to the backup server via HTTP.
|
||||
// It automatically detects if this is a full or incremental backup:
|
||||
// - If no bookmark exists, does a full backup
|
||||
// - If bookmark exists, does an incremental backup from the bookmark
|
||||
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
||||
// Check for existing bookmark to determine backup type
|
||||
lastBookmark, err := c.GetLastBookmark()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
|
||||
}
|
||||
|
||||
// Create new snapshot
|
||||
snapshot, err := c.CreateSnapshot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
isFullBackup := lastBookmark == ""
|
||||
if isFullBackup {
|
||||
fmt.Println("→ No previous backup found, doing FULL backup...")
|
||||
// Send as full (no base)
|
||||
if err := c.SendIncrementalHTTP(snapshot, ""); err != nil {
|
||||
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark)
|
||||
// Send as incremental from bookmark
|
||||
if err := c.SendIncrementalHTTP(snapshot, lastBookmark); err != nil {
|
||||
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create bookmark for future incremental backups
|
||||
if err := c.CreateBookmark(snapshot); err != nil {
|
||||
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||
}
|
||||
|
||||
return &SnapshotResult{
|
||||
FullBackup: isFullBackup,
|
||||
Snapshot: snapshot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
|
||||
// The snapshot is named with a timestamp for easy identification.
|
||||
// Returns the created snapshot dataset or an error.
|
||||
func (c *Client) CreateSnapshot() (*zfs.Dataset, error) {
|
||||
// Get the local dataset
|
||||
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||
@@ -63,12 +100,14 @@ func (c *Client) GetSnapshotSize(snapshot *zfs.Dataset) int64 {
|
||||
return int64(snapshot.Used)
|
||||
}
|
||||
|
||||
// SendSnapshot sends a snapshot to the backup server.
|
||||
// It first requests upload authorization, then streams the snapshot
|
||||
// using the appropriate method (S3 or ZFS receive).
|
||||
func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
// SendIncrementalHTTP sends a snapshot to the server via HTTP.
|
||||
// The server then handles storage (S3 or local ZFS).
|
||||
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error {
|
||||
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||
|
||||
// Determine if this is incremental or full
|
||||
isIncremental := base != ""
|
||||
|
||||
// Request upload authorization from server
|
||||
uploadReq := map[string]interface{}{
|
||||
"client_id": c.config.ClientID,
|
||||
@@ -77,6 +116,8 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
"timestamp": time.Now().Format(time.RFC3339),
|
||||
"compressed": c.config.Compress,
|
||||
"estimated_size": estimatedSize,
|
||||
"incremental": isIncremental,
|
||||
"base_snapshot": base,
|
||||
}
|
||||
|
||||
reqBody, _ := json.Marshal(uploadReq)
|
||||
@@ -86,7 +127,6 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Parse server response
|
||||
var uploadResp struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
@@ -107,50 +147,24 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
||||
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||
|
||||
// Choose upload method based on server response
|
||||
if uploadResp.UploadMethod == "s3" {
|
||||
return c.streamToS3(snapshot, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||
}
|
||||
return c.sendViaZFS(snapshot, uploadResp.StorageKey)
|
||||
// Stream to server via HTTP
|
||||
return c.streamToServer(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||
}
|
||||
|
||||
// streamToS3 streams a ZFS snapshot to S3 storage using AWS SDK.
|
||||
// The snapshot is optionally compressed with LZ4 before transmission.
|
||||
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
|
||||
fmt.Printf("→ Uploading snapshot to S3...\n")
|
||||
|
||||
// Ensure endpoint has valid URI scheme
|
||||
endpoint := c.config.S3Endpoint
|
||||
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
|
||||
// Create AWS config
|
||||
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
config.WithRegion(c.config.S3Region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
|
||||
c.config.S3AccessKey,
|
||||
c.config.S3SecretKey,
|
||||
"",
|
||||
)),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load AWS config: %v", err)
|
||||
}
|
||||
|
||||
// Determine if using custom endpoint (non-AWS)
|
||||
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
|
||||
|
||||
// Create S3 client
|
||||
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if customEndpoint {
|
||||
o.BaseEndpoint = aws.String(endpoint)
|
||||
o.UsePathStyle = true // Required for MinIO compatible storage
|
||||
}
|
||||
})
|
||||
// streamToServer streams a ZFS snapshot to the backup server via HTTP.
|
||||
func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
||||
fmt.Printf("→ Streaming snapshot to server...\n")
|
||||
|
||||
// Create ZFS send command
|
||||
cmd := exec.Command("zfs", "send", snapshot.Name)
|
||||
var cmd *exec.Cmd
|
||||
if base != "" {
|
||||
// Incremental send from bookmark or snapshot
|
||||
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
|
||||
} else {
|
||||
// Full send
|
||||
cmd = exec.Command("zfs", "send", snapshot.Name)
|
||||
}
|
||||
|
||||
zfsOut, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pipe: %v", err)
|
||||
@@ -167,12 +181,10 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
|
||||
fmt.Printf(" Compressing with LZ4...\n")
|
||||
pr, pw := io.Pipe()
|
||||
lz4Writer := lz4.NewWriter(pw)
|
||||
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance
|
||||
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks
|
||||
|
||||
go func() {
|
||||
// Copy zfs output to LZ4 writer
|
||||
io.Copy(lz4Writer, zfsOut)
|
||||
// Close LZ4 writer first to flush, then close pipe
|
||||
lz4Writer.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
@@ -180,113 +192,66 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
|
||||
reader = pr
|
||||
}
|
||||
|
||||
// Upload to S3 using PutObject
|
||||
_, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.config.S3Bucket),
|
||||
Key: aws.String(storageKey),
|
||||
Body: reader,
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
// Create HTTP request to server
|
||||
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||
req.Header.Set("X-Storage-Key", storageKey)
|
||||
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
|
||||
if base != "" {
|
||||
req.Header.Set("X-Base-Snapshot", base)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
// Send request with no timeout for large uploads
|
||||
httpClient := &http.Client{
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
httpResp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
cmd.Process.Kill()
|
||||
return fmt.Errorf("failed to upload: %v", err)
|
||||
}
|
||||
defer httpResp.Body.Close()
|
||||
|
||||
if httpResp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(httpResp.Body)
|
||||
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
||||
}
|
||||
|
||||
// Wait for zfs send to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("zfs send failed: %v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload to S3: %v", err)
|
||||
// Parse response
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
|
||||
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return fmt.Errorf("upload failed: %s", result.Message)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
||||
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendViaZFS sends a snapshot via traditional ZFS send/receive over SSH.
|
||||
// This method is used when the server uses local ZFS storage.
|
||||
func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
|
||||
fmt.Printf("→ Sending via ZFS send/receive...\n")
|
||||
|
||||
// Extract server host from URL
|
||||
serverHost := c.config.ServerURL
|
||||
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
||||
serverHost = serverHost[7:]
|
||||
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
||||
serverHost = serverHost[8:]
|
||||
}
|
||||
|
||||
// Remove port if present
|
||||
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
||||
serverHost = serverHost[:idx]
|
||||
}
|
||||
|
||||
// Execute ZFS send over SSH
|
||||
cmd := exec.Command("sh", "-c",
|
||||
fmt.Sprintf("zfs send %s | ssh %s 'zfs recv -F %s'",
|
||||
snapshot.Name, serverHost, receivePath))
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot sent successfully!\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SnapshotResult contains the result of a snapshot creation and send operation.
|
||||
type SnapshotResult struct {
|
||||
FullBackup bool
|
||||
Snapshot *zfs.Dataset
|
||||
}
|
||||
|
||||
// CreateAndSend creates a snapshot and sends it to the backup server.
|
||||
// It automatically detects if this is a full or incremental backup:
|
||||
// - If no bookmark exists, does a full backup
|
||||
// - If bookmark exists, does an incremental backup from the bookmark
|
||||
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
||||
// Check for existing bookmark to determine backup type
|
||||
lastBookmark, err := c.GetLastBookmark()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
|
||||
}
|
||||
|
||||
// Create new snapshot
|
||||
snapshot, err := c.CreateSnapshot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
isFullBackup := lastBookmark == ""
|
||||
if isFullBackup {
|
||||
fmt.Println("→ No previous backup found, doing FULL backup...")
|
||||
// Send as full (no base)
|
||||
if err := c.SendIncremental(snapshot, ""); err != nil {
|
||||
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...", lastBookmark)
|
||||
// Send as incremental from bookmark
|
||||
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
|
||||
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create bookmark for future incremental backups
|
||||
if err := c.CreateBookmark(snapshot); err != nil {
|
||||
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||
}
|
||||
|
||||
return &SnapshotResult{
|
||||
FullBackup: isFullBackup,
|
||||
Snapshot: snapshot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetStatus retrieves and displays the client's backup status from the server.
|
||||
// Shows storage usage, quota, and snapshot count.
|
||||
func (c *Client) GetStatus() error {
|
||||
url := fmt.Sprintf("%s/status?client_id=%s&api_key=%s",
|
||||
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
||||
|
||||
Reference in New Issue
Block a user