server client restore

This commit is contained in:
2026-02-13 14:59:43 +01:00
commit 20e90ec240
17 changed files with 3185 additions and 0 deletions

95
internal/server/config.go Normal file
View File

@@ -0,0 +1,95 @@
package server
import (
"bufio"
"os"
"strings"
)
// Config holds application configuration from environment and .env file
type Config struct {
S3Endpoint string
S3AccessKey string
S3SecretKey string
S3BucketName string
S3UseSSL bool
S3Enabled bool // Enable/disable S3 backend
BaseDataset string
ConfigFile string
MetadataFile string
Port string
}
// LoadConfig loads configuration from .env file and environment variables
func LoadConfig() *Config {
// Load .env file if exists
loadEnvFile(".env")
s3Enabled := getEnv("S3_ENABLED", "true") == "true"
s3AccessKey := os.Getenv("S3_ACCESS_KEY")
s3SecretKey := os.Getenv("S3_SECRET_KEY")
// Disable S3 if credentials are missing
if s3AccessKey == "" || s3SecretKey == "" {
s3Enabled = false
}
return &Config{
S3Endpoint: getEnv("S3_ENDPOINT", "s3.amazonaws.com"),
S3AccessKey: s3AccessKey,
S3SecretKey: s3SecretKey,
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
S3Enabled: s3Enabled,
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
ConfigFile: getEnv("CONFIG_FILE", "clients.json"),
MetadataFile: getEnv("METADATA_FILE", "metadata.json"),
Port: getEnv("PORT", "8080"),
}
}
// loadEnvFile loads key=value pairs from a .env file
func loadEnvFile(filename string) {
file, err := os.Open(filename)
if err != nil {
return // .env file is optional
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// Skip empty lines and comments
if line == "" || strings.HasPrefix(line, "#") {
continue
}
// Parse key=value
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
continue
}
key := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
// Remove quotes if present
if len(value) >= 2 && (value[0] == '"' || value[0] == '\'') {
value = value[1 : len(value)-1]
}
// Only set if not already defined in environment
if os.Getenv(key) == "" {
os.Setenv(key, value)
}
}
}
// getEnv gets an environment variable with a default value
func getEnv(key, defaultValue string) string {
if value := os.Getenv(key); value != "" {
return value
}
return defaultValue
}

583
internal/server/server.go Normal file
View File

@@ -0,0 +1,583 @@
package server
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"sort"
"strings"
"sync"
"time"
)
// Server manages snapshots from multiple clients with S3 support
type Server struct {
clients map[string]*ClientConfig
snapshots map[string][]*SnapshotMetadata
mu sync.RWMutex
s3Backend *S3Backend
localBackend *LocalBackend
metadataFile string
configFile string
}
// New creates a new snapshot server
func New(configFile, metadataFile string, s3Backend *S3Backend, localBackend *LocalBackend) *Server {
s := &Server{
clients: make(map[string]*ClientConfig),
snapshots: make(map[string][]*SnapshotMetadata),
s3Backend: s3Backend,
localBackend: localBackend,
metadataFile: metadataFile,
configFile: configFile,
}
s.loadConfig()
s.loadMetadata()
return s
}
func (s *Server) loadConfig() {
data, err := os.ReadFile(s.configFile)
if err != nil {
log.Printf("Warning: Could not read config file: %v", err)
// Create default config
s.clients["client1"] = &ClientConfig{
ClientID: "client1",
APIKey: hashAPIKey("secret123"),
MaxSizeBytes: 100 * 1024 * 1024 * 1024,
Dataset: "backup/client1",
Enabled: true,
StorageType: "s3",
}
s.saveConfig()
return
}
var clients []*ClientConfig
if err := json.Unmarshal(data, &clients); err != nil {
log.Printf("Error parsing config: %v", err)
return
}
for _, client := range clients {
s.clients[client.ClientID] = client
}
log.Printf("Loaded %d client configurations", len(s.clients))
}
func (s *Server) saveConfig() {
s.mu.RLock()
defer s.mu.RUnlock()
var clients []*ClientConfig
for _, client := range s.clients {
clients = append(clients, client)
}
data, err := json.MarshalIndent(clients, "", " ")
if err != nil {
log.Printf("Error marshaling config: %v", err)
return
}
if err := os.WriteFile(s.configFile, data, 0600); err != nil {
log.Printf("Error writing config: %v", err)
}
}
func (s *Server) loadMetadata() {
data, err := os.ReadFile(s.metadataFile)
if err != nil {
log.Printf("No existing metadata file, starting fresh")
return
}
if err := json.Unmarshal(data, &s.snapshots); err != nil {
log.Printf("Error parsing metadata: %v", err)
return
}
totalSnapshots := 0
for _, snaps := range s.snapshots {
totalSnapshots += len(snaps)
}
log.Printf("Loaded metadata for %d snapshots", totalSnapshots)
}
func (s *Server) saveMetadata() {
s.mu.RLock()
defer s.mu.RUnlock()
data, err := json.MarshalIndent(s.snapshots, "", " ")
if err != nil {
log.Printf("Error marshaling metadata: %v", err)
return
}
if err := os.WriteFile(s.metadataFile, data, 0600); err != nil {
log.Printf("Error writing metadata: %v", err)
}
}
func (s *Server) authenticate(clientID, apiKey string) bool {
s.mu.RLock()
defer s.mu.RUnlock()
client, exists := s.clients[clientID]
if !exists || !client.Enabled {
return false
}
return client.APIKey == hashAPIKey(apiKey)
}
func (s *Server) getClientUsage(clientID string) int64 {
s.mu.RLock()
defer s.mu.RUnlock()
var total int64
for _, snap := range s.snapshots[clientID] {
total += snap.SizeBytes
}
return total
}
func (s *Server) canAcceptSnapshot(clientID string, estimatedSize int64) (bool, string) {
s.mu.RLock()
defer s.mu.RUnlock()
client, exists := s.clients[clientID]
if !exists {
return false, "Client not found"
}
currentUsage := s.getClientUsage(clientID)
if currentUsage+estimatedSize > client.MaxSizeBytes {
return false, fmt.Sprintf("Quota exceeded: using %d/%d bytes",
currentUsage, client.MaxSizeBytes)
}
return true, "OK"
}
func (s *Server) rotateSnapshots(clientID string) (int, int64) {
// First pass: collect snapshots to delete while holding lock
s.mu.Lock()
client, exists := s.clients[clientID]
if !exists {
s.mu.Unlock()
return 0, 0
}
snapshots := s.snapshots[clientID]
if len(snapshots) == 0 {
s.mu.Unlock()
return 0, 0
}
// Sort by timestamp (oldest first)
sort.Slice(snapshots, func(i, j int) bool {
return snapshots[i].Timestamp.Before(snapshots[j].Timestamp)
})
currentUsage := int64(0)
for _, snap := range snapshots {
currentUsage += snap.SizeBytes
}
// Collect snapshots to delete
var toDelete []*SnapshotMetadata
for currentUsage > client.MaxSizeBytes && len(snapshots) > 1 {
oldest := snapshots[0]
toDelete = append(toDelete, oldest)
currentUsage -= oldest.SizeBytes
snapshots = snapshots[1:]
}
// Update state before I/O
s.snapshots[clientID] = snapshots
s.mu.Unlock()
if len(toDelete) == 0 {
return 0, 0
}
// Select appropriate backend
var backend StorageBackend
if client.StorageType == "s3" {
backend = s.s3Backend
} else {
backend = s.localBackend
}
// Second pass: delete without holding lock
deletedCount := 0
reclaimedBytes := int64(0)
ctx := context.Background()
for _, snap := range toDelete {
if err := backend.Delete(ctx, snap.StorageKey); err != nil {
log.Printf("Error deleting snapshot %s: %v", snap.StorageKey, err)
continue
}
log.Printf("Rotated out snapshot: %s (freed %d bytes)", snap.StorageKey, snap.SizeBytes)
reclaimedBytes += snap.SizeBytes
deletedCount++
}
// Save metadata after deletions
s.saveMetadata()
return deletedCount, reclaimedBytes
}
// HTTP Handlers
// HandleUpload handles snapshot upload requests
func (s *Server) HandleUpload(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
var req UploadRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
respondJSON(w, http.StatusBadRequest, UploadResponse{
Success: false,
Message: "Invalid request",
})
return
}
if !s.authenticate(req.ClientID, req.APIKey) {
respondJSON(w, http.StatusUnauthorized, UploadResponse{
Success: false,
Message: "Authentication failed",
})
return
}
// Check quota
estimatedSize := req.EstimatedSize
if estimatedSize == 0 {
estimatedSize = 1 * 1024 * 1024 * 1024 // Default 1GB estimate
}
canAccept, msg := s.canAcceptSnapshot(req.ClientID, estimatedSize)
if !canAccept {
respondJSON(w, http.StatusForbidden, UploadResponse{
Success: false,
Message: msg,
})
return
}
s.mu.RLock()
client := s.clients[req.ClientID]
s.mu.RUnlock()
timestamp := time.Now().Format("2006-01-02_15:04:05")
if client.StorageType == "s3" {
// S3 upload
storageKey := fmt.Sprintf("%s/%s_%s.zfs", req.ClientID, req.DatasetName, timestamp)
if req.Compressed {
storageKey += ".gz"
}
respondJSON(w, http.StatusOK, UploadResponse{
Success: true,
Message: "Ready to receive snapshot",
UploadMethod: "s3",
StorageKey: storageKey,
UploadURL: fmt.Sprintf("/upload-stream/%s", req.ClientID),
})
} else {
// Local ZFS receive
snapshotName := fmt.Sprintf("%s@%s_%s", client.Dataset, req.ClientID, timestamp)
respondJSON(w, http.StatusOK, UploadResponse{
Success: true,
Message: "Ready to receive snapshot",
UploadMethod: "zfs-receive",
StorageKey: snapshotName,
})
}
}
// HandleUploadStream handles streaming snapshot uploads
func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Extract client ID from URL
parts := strings.Split(r.URL.Path, "/")
if len(parts) < 3 {
http.Error(w, "Invalid URL", http.StatusBadRequest)
return
}
clientID := parts[2]
// Get metadata from headers
apiKey := r.Header.Get("X-API-Key")
storageKey := r.Header.Get("X-Storage-Key")
datasetName := r.Header.Get("X-Dataset-Name")
compressedStr := r.Header.Get("X-Compressed")
incrementalStr := r.Header.Get("X-Incremental")
baseSnapshot := r.Header.Get("X-Base-Snapshot")
if !s.authenticate(clientID, apiKey) {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
ctx := context.Background()
// Upload to S3
size := r.ContentLength
if size < 0 {
size = 0
}
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
log.Printf("Error uploading to S3: %v", err)
http.Error(w, "Upload failed", http.StatusInternalServerError)
return
}
// Get actual size after upload
actualSize, err := s.s3Backend.GetSize(ctx, storageKey)
if err != nil {
log.Printf("Error getting object size: %v", err)
actualSize = size
}
// Save metadata
s.mu.Lock()
metadata := &SnapshotMetadata{
ClientID: clientID,
SnapshotID: storageKey,
Timestamp: time.Now(),
SizeBytes: actualSize,
DatasetName: datasetName,
StorageKey: storageKey,
StorageType: "s3",
Compressed: compressedStr == "true",
Incremental: incrementalStr == "true",
BaseSnapshot: baseSnapshot,
}
s.snapshots[clientID] = append(s.snapshots[clientID], metadata)
s.mu.Unlock()
s.saveMetadata()
respondJSON(w, http.StatusOK, map[string]interface{}{
"success": true,
"message": "Snapshot uploaded successfully",
"size": actualSize,
})
}
// HandleStatus handles status requests
func (s *Server) HandleStatus(w http.ResponseWriter, r *http.Request) {
clientID := r.URL.Query().Get("client_id")
apiKey := r.URL.Query().Get("api_key")
if !s.authenticate(clientID, apiKey) {
respondJSON(w, http.StatusUnauthorized, StatusResponse{Success: false})
return
}
s.mu.RLock()
client := s.clients[clientID]
snapshots := s.snapshots[clientID]
s.mu.RUnlock()
usedBytes := s.getClientUsage(clientID)
percentUsed := float64(usedBytes) / float64(client.MaxSizeBytes) * 100
respondJSON(w, http.StatusOK, StatusResponse{
Success: true,
TotalSnapshots: len(snapshots),
UsedBytes: usedBytes,
MaxBytes: client.MaxSizeBytes,
PercentUsed: percentUsed,
Snapshots: snapshots,
StorageType: client.StorageType,
})
}
// HandleRotate handles snapshot rotation requests
func (s *Server) HandleRotate(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
var req struct {
ClientID string `json:"client_id"`
APIKey string `json:"api_key"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request", http.StatusBadRequest)
return
}
if !s.authenticate(req.ClientID, req.APIKey) {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
deletedCount, reclaimedBytes := s.rotateSnapshots(req.ClientID)
respondJSON(w, http.StatusOK, map[string]interface{}{
"success": true,
"deleted_count": deletedCount,
"reclaimed_bytes": reclaimedBytes,
})
}
// HandleDownload handles snapshot download requests
func (s *Server) HandleDownload(w http.ResponseWriter, r *http.Request) {
clientID := r.URL.Query().Get("client_id")
apiKey := r.URL.Query().Get("api_key")
snapshotID := r.URL.Query().Get("snapshot_id")
if !s.authenticate(clientID, apiKey) {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
// Find snapshot metadata
s.mu.RLock()
client := s.clients[clientID]
var targetSnapshot *SnapshotMetadata
for _, snap := range s.snapshots[clientID] {
if snap.SnapshotID == snapshotID {
targetSnapshot = snap
break
}
}
s.mu.RUnlock()
if targetSnapshot == nil {
http.Error(w, "Snapshot not found", http.StatusNotFound)
return
}
ctx := context.Background()
var backend StorageBackend
if client.StorageType == "s3" {
backend = s.s3Backend
} else {
backend = s.localBackend
}
// Download from storage
reader, err := backend.Download(ctx, targetSnapshot.StorageKey)
if err != nil {
log.Printf("Error downloading snapshot: %v", err)
http.Error(w, "Download failed", http.StatusInternalServerError)
return
}
defer reader.Close()
// Stream to client
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", targetSnapshot.StorageKey))
if _, err := io.Copy(w, reader); err != nil {
log.Printf("Error streaming snapshot: %v", err)
}
}
// HandleHealth handles health check requests
func (s *Server) HandleHealth(w http.ResponseWriter, r *http.Request) {
respondJSON(w, http.StatusOK, map[string]interface{}{
"status": "healthy",
"time": time.Now(),
})
}
// HandleRotationPolicy handles rotation policy requests from clients.
// Returns the rotation policy configured for the client, if any.
// If a policy is set, the client must use it and cannot override it.
func (s *Server) HandleRotationPolicy(w http.ResponseWriter, r *http.Request) {
clientID := r.URL.Query().Get("client_id")
apiKey := r.URL.Query().Get("api_key")
if !s.authenticate(clientID, apiKey) {
respondJSON(w, http.StatusUnauthorized, RotationPolicyResponse{
Success: false,
Message: "Authentication failed",
})
return
}
s.mu.RLock()
client, exists := s.clients[clientID]
s.mu.RUnlock()
if !exists {
respondJSON(w, http.StatusNotFound, RotationPolicyResponse{
Success: false,
Message: "Client not found",
})
return
}
// Check if server-managed rotation policy is configured
if client.RotationPolicy != nil {
respondJSON(w, http.StatusOK, RotationPolicyResponse{
Success: true,
Message: "Server-managed rotation policy",
RotationPolicy: client.RotationPolicy,
ServerManaged: true,
})
return
}
// No server-managed policy - client can use its own defaults
respondJSON(w, http.StatusOK, RotationPolicyResponse{
Success: true,
Message: "No server-managed policy, client can use defaults",
RotationPolicy: nil,
ServerManaged: false,
})
}
// RegisterRoutes registers all HTTP routes
func (s *Server) RegisterRoutes(mux *http.ServeMux) {
mux.HandleFunc("/upload", s.HandleUpload)
mux.HandleFunc("/upload-stream/", s.HandleUploadStream)
mux.HandleFunc("/status", s.HandleStatus)
mux.HandleFunc("/rotate", s.HandleRotate)
mux.HandleFunc("/download", s.HandleDownload)
mux.HandleFunc("/health", s.HandleHealth)
mux.HandleFunc("/rotation-policy", s.HandleRotationPolicy)
}
func respondJSON(w http.ResponseWriter, status int, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
json.NewEncoder(w).Encode(data)
}
func hashAPIKey(key string) string {
hash := sha256.Sum256([]byte(key))
return hex.EncodeToString(hash[:])
}

197
internal/server/storage.go Normal file
View File

@@ -0,0 +1,197 @@
package server
import (
"context"
"fmt"
"io"
"log"
"os/exec"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/mistifyio/go-zfs"
)
// StorageBackend defines the interface for different storage types
type StorageBackend interface {
Upload(ctx context.Context, key string, data io.Reader, size int64) error
Download(ctx context.Context, key string) (io.ReadCloser, error)
Delete(ctx context.Context, key string) error
List(ctx context.Context, prefix string) ([]string, error)
GetSize(ctx context.Context, key string) (int64, error)
}
// S3Backend implements StorageBackend for S3-compatible storage
type S3Backend struct {
client *minio.Client
bucketName string
}
// NewS3Backend creates a new S3 storage backend
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
client, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: useSSL,
})
if err != nil {
return nil, fmt.Errorf("failed to create S3 client: %v", err)
}
// Ensure bucket exists
ctx := context.Background()
exists, err := client.BucketExists(ctx, bucketName)
if err != nil {
return nil, fmt.Errorf("failed to check bucket: %v", err)
}
if !exists {
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create bucket: %v", err)
}
log.Printf("Created S3 bucket: %s", bucketName)
}
return &S3Backend{
client: client,
bucketName: bucketName,
}, nil
}
// Upload uploads data to S3
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
minio.PutObjectOptions{
ContentType: "application/octet-stream",
PartSize: 10 * 1024 * 1024, // 10MB parts
})
return err
}
// Download retrieves data from S3
func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
obj, err := s.client.GetObject(ctx, s.bucketName, key, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
// Delete removes an object from S3
func (s *S3Backend) Delete(ctx context.Context, key string) error {
return s.client.RemoveObject(ctx, s.bucketName, key, minio.RemoveObjectOptions{})
}
// List returns all objects with the given prefix
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
var keys []string
objectCh := s.client.ListObjects(ctx, s.bucketName, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: true,
})
for object := range objectCh {
if object.Err != nil {
return nil, object.Err
}
keys = append(keys, object.Key)
}
return keys, nil
}
// GetSize returns the size of an object in S3
func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) {
info, err := s.client.StatObject(ctx, s.bucketName, key, minio.StatObjectOptions{})
if err != nil {
return 0, err
}
return info.Size, nil
}
// LocalBackend implements StorageBackend for local ZFS storage
type LocalBackend struct {
baseDataset string
}
// NewLocalBackend creates a new local ZFS storage backend
func NewLocalBackend(baseDataset string) *LocalBackend {
return &LocalBackend{baseDataset: baseDataset}
}
// Upload is not supported for local backend
func (l *LocalBackend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
return fmt.Errorf("local backend upload not supported via storage interface, use zfs receive endpoint")
}
// Download creates a zfs send stream
func (l *LocalBackend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
cmd := exec.CommandContext(ctx, "zfs", "send", key)
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
if err := cmd.Start(); err != nil {
return nil, err
}
return &cmdReadCloser{stdout: stdout, cmd: cmd}, nil
}
// Delete destroys a ZFS dataset
func (l *LocalBackend) Delete(ctx context.Context, key string) error {
ds, err := zfs.GetDataset(key)
if err != nil {
return err
}
return ds.Destroy(zfs.DestroyDefault)
}
// List returns all snapshots with the given prefix
func (l *LocalBackend) List(ctx context.Context, prefix string) ([]string, error) {
snapshots, err := zfs.Snapshots(prefix)
if err != nil {
return nil, err
}
var names []string
for _, snap := range snapshots {
names = append(names, snap.Name)
}
return names, nil
}
// GetSize returns the used size of a ZFS dataset
func (l *LocalBackend) GetSize(ctx context.Context, key string) (int64, error) {
ds, err := zfs.GetDataset(key)
if err != nil {
return 0, err
}
return int64(ds.Used), nil
}
// cmdReadCloser wraps stdout pipe to properly wait for command completion
type cmdReadCloser struct {
stdout io.ReadCloser
cmd *exec.Cmd
closed bool
}
func (c *cmdReadCloser) Read(p []byte) (int, error) {
return c.stdout.Read(p)
}
func (c *cmdReadCloser) Close() error {
if c.closed {
return nil
}
c.closed = true
err := c.stdout.Close()
waitErr := c.cmd.Wait()
if err != nil {
return err
}
return waitErr
}

87
internal/server/types.go Normal file
View File

@@ -0,0 +1,87 @@
package server
import (
"time"
)
// ClientConfig holds client authentication and quota information
type ClientConfig struct {
ClientID string `json:"client_id"`
APIKey string `json:"api_key"`
MaxSizeBytes int64 `json:"max_size_bytes"`
Dataset string `json:"dataset"`
Enabled bool `json:"enabled"`
StorageType string `json:"storage_type"` // "s3" or "local"
// RotationPolicy defines the snapshot retention policy for this client
// If set, the client must use this policy and cannot override it
RotationPolicy *RotationPolicy `json:"rotation_policy,omitempty"`
}
// RotationPolicy defines retention settings for automatic snapshots.
// When set on the server, clients must use this policy for local rotation.
type RotationPolicy struct {
// KeepHourly is the number of hourly snapshots to keep
KeepHourly int `json:"keep_hourly"`
// KeepDaily is the number of daily snapshots to keep
KeepDaily int `json:"keep_daily"`
// KeepWeekly is the number of weekly snapshots to keep
KeepWeekly int `json:"keep_weekly"`
// KeepMonthly is the number of monthly snapshots to keep
KeepMonthly int `json:"keep_monthly"`
}
// SnapshotMetadata represents snapshot information
type SnapshotMetadata struct {
ClientID string `json:"client_id"`
SnapshotID string `json:"snapshot_id"`
Timestamp time.Time `json:"timestamp"`
SizeBytes int64 `json:"size_bytes"`
DatasetName string `json:"dataset_name"`
StorageKey string `json:"storage_key"`
StorageType string `json:"storage_type"`
Compressed bool `json:"compressed"`
Incremental bool `json:"incremental"`
BaseSnapshot string `json:"base_snapshot,omitempty"`
}
// UploadRequest represents a snapshot upload request
type UploadRequest struct {
ClientID string `json:"client_id"`
APIKey string `json:"api_key"`
DatasetName string `json:"dataset_name"`
Timestamp string `json:"timestamp"`
Compressed bool `json:"compressed"`
EstimatedSize int64 `json:"estimated_size"`
Incremental bool `json:"incremental"`
BaseSnapshot string `json:"base_snapshot,omitempty"`
}
// UploadResponse represents the response to an upload request
type UploadResponse struct {
Success bool `json:"success"`
Message string `json:"message"`
UploadURL string `json:"upload_url,omitempty"`
UploadMethod string `json:"upload_method,omitempty"` // "s3" or "zfs-receive"
StorageKey string `json:"storage_key,omitempty"`
}
// StatusResponse represents the response to a status request
type StatusResponse struct {
Success bool `json:"success"`
TotalSnapshots int `json:"total_snapshots"`
UsedBytes int64 `json:"used_bytes"`
MaxBytes int64 `json:"max_bytes"`
PercentUsed float64 `json:"percent_used"`
Snapshots []*SnapshotMetadata `json:"snapshots"`
StorageType string `json:"storage_type"`
}
// RotationPolicyResponse represents the response to a rotation policy request
type RotationPolicyResponse struct {
Success bool `json:"success"`
Message string `json:"message,omitempty"`
RotationPolicy *RotationPolicy `json:"rotation_policy,omitempty"`
// ServerManaged indicates if the policy is managed by the server
// If true, client must use this policy and cannot override it
ServerManaged bool `json:"server_managed"`
}