multi dataset

This commit is contained in:
2026-02-16 02:37:31 +01:00
parent 1903535dc5
commit 344b9d658b
6 changed files with 217 additions and 25 deletions

View File

@@ -23,9 +23,16 @@ func main() {
switch command { switch command {
case "snap", "snapshot": case "snap", "snapshot":
// Create snapshot and send to server (auto full/incremental) // Create snapshot and send to server (auto full/incremental)
// Optional: specify dataset as argument
targetDataset := ""
if len(os.Args) > 2 {
targetDataset = os.Args[2]
fmt.Printf("→ Using dataset: %s\n", targetDataset)
}
fmt.Println("=== Creating and sending snapshot ===\n") fmt.Println("=== Creating and sending snapshot ===\n")
snapshot, err := c.CreateAndSend() snapshot, err := c.CreateAndSend(targetDataset)
if err != nil { if err != nil {
fmt.Printf("Error: %v\n", err) fmt.Printf("Error: %v\n", err)
os.Exit(1) os.Exit(1)
@@ -56,9 +63,10 @@ func main() {
func printUsage() { func printUsage() {
fmt.Println("ZFS Snapshot Backup Client - Simple Version") fmt.Println("ZFS Snapshot Backup Client - Simple Version")
fmt.Println("\nUsage: zfs-client [command]") fmt.Println("\nUsage: zfs-client [command] [dataset]")
fmt.Println("\nCommands:") fmt.Println("\nCommands:")
fmt.Println(" snap - Create snapshot and send to server") fmt.Println(" snap [dataset] - Create snapshot and send to server")
fmt.Println(" If dataset not specified, uses LOCAL_DATASET from config")
fmt.Println(" status - Check server connection and quota") fmt.Println(" status - Check server connection and quota")
fmt.Println(" help - Show this help message") fmt.Println(" help - Show this help message")
fmt.Println("\nEnvironment Variables (can be set in .env file):") fmt.Println("\nEnvironment Variables (can be set in .env file):")
@@ -68,6 +76,7 @@ func printUsage() {
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)") fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)") fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
fmt.Println("\nExamples:") fmt.Println("\nExamples:")
fmt.Println(" zfs-client snap") fmt.Println(" zfs-client snap # Use configured dataset")
fmt.Println(" zfs-client snap tank/data # Backup specific dataset")
fmt.Println(" zfs-client status") fmt.Println(" zfs-client status")
} }

View File

@@ -38,7 +38,13 @@ func New(config *Config) *Client {
// It automatically detects if this is a full or incremental backup: // It automatically detects if this is a full or incremental backup:
// - If no bookmark exists, does a full backup // - If no bookmark exists, does a full backup
// - If bookmark exists, does an incremental backup from the bookmark // - If bookmark exists, does an incremental backup from the bookmark
func (c *Client) CreateAndSend() (*SnapshotResult, error) { // If targetDataset is provided, it overrides the configured dataset.
func (c *Client) CreateAndSend(targetDataset string) (*SnapshotResult, error) {
// Use provided dataset or fall back to config
if targetDataset == "" {
targetDataset = c.config.LocalDataset
}
// Check for existing bookmark to determine backup type // Check for existing bookmark to determine backup type
lastBookmark, err := c.GetLastBookmark() lastBookmark, err := c.GetLastBookmark()
if err != nil { if err != nil {
@@ -46,7 +52,7 @@ func (c *Client) CreateAndSend() (*SnapshotResult, error) {
} }
// Create new snapshot // Create new snapshot
snapshot, err := c.CreateSnapshot() snapshot, err := c.CreateSnapshot(targetDataset)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create snapshot: %v", err) return nil, fmt.Errorf("failed to create snapshot: %v", err)
} }
@@ -55,13 +61,13 @@ func (c *Client) CreateAndSend() (*SnapshotResult, error) {
if isFullBackup { if isFullBackup {
fmt.Println("→ No previous backup found, doing FULL backup...") fmt.Println("→ No previous backup found, doing FULL backup...")
// Send as full (no base) // Send as full (no base)
if err := c.SendIncrementalHTTP(snapshot, ""); err != nil { if err := c.SendIncrementalHTTP(snapshot, targetDataset, ""); err != nil {
return nil, fmt.Errorf("failed to send snapshot: %v", err) return nil, fmt.Errorf("failed to send snapshot: %v", err)
} }
} else { } else {
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark) fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark)
// Send as incremental from bookmark // Send as incremental from bookmark
if err := c.SendIncrementalHTTP(snapshot, lastBookmark); err != nil { if err := c.SendIncrementalHTTP(snapshot, targetDataset, lastBookmark); err != nil {
return nil, fmt.Errorf("failed to send incremental: %v", err) return nil, fmt.Errorf("failed to send incremental: %v", err)
} }
} }
@@ -78,8 +84,8 @@ func (c *Client) CreateAndSend() (*SnapshotResult, error) {
} }
// CreateSnapshot creates a local ZFS snapshot of the configured dataset. // CreateSnapshot creates a local ZFS snapshot of the configured dataset.
func (c *Client) CreateSnapshot() (*zfs.Dataset, error) { func (c *Client) CreateSnapshot(dataset string) (*zfs.Dataset, error) {
ds, err := zfs.GetDataset(c.config.LocalDataset) ds, err := zfs.GetDataset(dataset)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get dataset: %v", err) return nil, fmt.Errorf("failed to get dataset: %v", err)
} }
@@ -105,7 +111,8 @@ func (c *Client) GetSnapshotSize(snapshot *zfs.Dataset) int64 {
// SendIncrementalHTTP sends a snapshot to the server via HTTP. // SendIncrementalHTTP sends a snapshot to the server via HTTP.
// The server then handles storage (S3 or local ZFS). // The server then handles storage (S3 or local ZFS).
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error { // datasetName should be the ZFS dataset being backed up (e.g., "tank/data")
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, datasetName, base string) error {
estimatedSize := c.GetSnapshotSize(snapshot) estimatedSize := c.GetSnapshotSize(snapshot)
// Determine if this is incremental or full // Determine if this is incremental or full
@@ -115,7 +122,7 @@ func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error {
uploadReq := map[string]interface{}{ uploadReq := map[string]interface{}{
"client_id": c.config.ClientID, "client_id": c.config.ClientID,
"api_key": c.config.APIKey, "api_key": c.config.APIKey,
"dataset_name": c.config.LocalDataset, "dataset_name": datasetName,
"timestamp": time.Now().Format(time.RFC3339), "timestamp": time.Now().Format(time.RFC3339),
"compressed": c.config.Compress, "compressed": c.config.Compress,
"estimated_size": estimatedSize, "estimated_size": estimatedSize,

View File

@@ -72,6 +72,6 @@ func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) {
} }
// SendIncremental is kept for API compatibility - now just calls HTTP version // SendIncremental is kept for API compatibility - now just calls HTTP version
func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error { func (c *Client) SendIncremental(snapshot *zfs.Dataset, datasetName, base string) error {
return c.SendIncrementalHTTP(snapshot, base) return c.SendIncrementalHTTP(snapshot, datasetName, base)
} }

View File

@@ -134,9 +134,11 @@ const adminPanelHTML = `<!DOCTYPE html>
<thead> <thead>
<tr> <tr>
<th>Client</th> <th>Client</th>
<th>Dataset</th>
<th>Snapshot ID</th> <th>Snapshot ID</th>
<th>Timestamp</th> <th>Timestamp</th>
<th>Size</th> <th>Size</th>
<th>Storage</th>
<th>Type</th> <th>Type</th>
<th>Actions</th> <th>Actions</th>
</tr> </tr>
@@ -488,12 +490,14 @@ const adminPanelHTML = `<!DOCTYPE html>
const sizeGB = (s.size_bytes / (1024*1024*1024)).toFixed(2); const sizeGB = (s.size_bytes / (1024*1024*1024)).toFixed(2);
return '<tr>' + return '<tr>' +
'<td>' + s.client_id + '</td>' + '<td>' + s.client_id + '</td>' +
'<td>' + (s.dataset_name || '-') + '</td>' +
'<td>' + s.snapshot_id + '</td>' + '<td>' + s.snapshot_id + '</td>' +
'<td>' + new Date(s.timestamp).toLocaleString() + '</td>' + '<td>' + new Date(s.timestamp).toLocaleString() + '</td>' +
'<td>' + sizeGB + ' GB</td>' + '<td>' + sizeGB + ' GB</td>' +
'<td><span class="badge ' + (s.storage_type === 's3' ? 'badge-info' : 'badge-warning') + '">' + s.storage_type + '</span></td>' +
'<td>' + '<td>' +
(s.incremental ? '<span class="badge badge-info">Incremental</span>' : '<span class="badge badge-success">Full</span>') + (s.incremental ? '<span class="badge badge-info">Inc</span>' : '<span class="badge badge-success">Full</span>') +
(s.compressed ? ' <span class="badge badge-info">Compressed</span>' : '') + (s.compressed ? ' <span class="badge badge-info">LZ4</span>' : '') +
'</td>' + '</td>' +
'<td><button class="btn btn-sm btn-danger" onclick="deleteSnapshot(\'' + s.client_id + '\', \'' + s.snapshot_id + '\')">Delete</button></td>' + '<td><button class="btn btn-sm btn-danger" onclick="deleteSnapshot(\'' + s.client_id + '\', \'' + s.snapshot_id + '\')">Delete</button></td>' +
'</tr>'; '</tr>';

View File

@@ -89,6 +89,23 @@ func (d *Database) initTables() error {
return fmt.Errorf("failed to create clients table: %v", err) return fmt.Errorf("failed to create clients table: %v", err)
} }
// Datasets table - multiple datasets per client
_, err = d.db.Exec(`
CREATE TABLE IF NOT EXISTS datasets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
client_id TEXT NOT NULL,
dataset_name TEXT NOT NULL,
storage_type TEXT NOT NULL DEFAULT 's3',
enabled INTEGER NOT NULL DEFAULT 1,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (client_id) REFERENCES clients(client_id) ON DELETE CASCADE,
UNIQUE(client_id, dataset_name)
)
`)
if err != nil {
return fmt.Errorf("failed to create datasets table: %v", err)
}
// Snapshots table // Snapshots table
_, err = d.db.Exec(` _, err = d.db.Exec(`
CREATE TABLE IF NOT EXISTS snapshots ( CREATE TABLE IF NOT EXISTS snapshots (
@@ -400,6 +417,138 @@ func (d *Database) CreateDefaultClient() error {
return d.SaveClient(defaultClient) return d.SaveClient(defaultClient)
} }
// CreateDefaultDataset creates a default dataset for a client if none exists
func (d *Database) CreateDefaultDataset(clientID, datasetName string) error {
datasets, err := d.GetDatasetsByClient(clientID)
if err != nil {
return err
}
if len(datasets) > 0 {
return nil
}
// Create default dataset
dataset := &DatasetConfig{
ClientID: clientID,
DatasetName: datasetName,
StorageType: "s3",
Enabled: true,
}
return d.SaveDataset(dataset)
}
// DatasetConfig represents a dataset configuration
type DatasetConfig struct {
ID int64 `json:"id"`
ClientID string `json:"client_id"`
DatasetName string `json:"dataset_name"`
StorageType string `json:"storage_type"`
Enabled bool `json:"enabled"`
}
// GetDatasetsByClient gets all datasets for a client
func (d *Database) GetDatasetsByClient(clientID string) ([]*DatasetConfig, error) {
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ?`
rows, err := d.db.Query(query, clientID)
if err != nil {
return nil, err
}
defer rows.Close()
var datasets []*DatasetConfig
for rows.Next() {
dataset := &DatasetConfig{}
var enabled int
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
if err != nil {
return nil, err
}
dataset.Enabled = enabled == 1
datasets = append(datasets, dataset)
}
return datasets, nil
}
// GetDatasetByName gets a dataset by client and dataset name
func (d *Database) GetDatasetByName(clientID, datasetName string) (*DatasetConfig, error) {
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ? AND dataset_name = ?`
row := d.db.QueryRow(query, clientID, datasetName)
dataset := &DatasetConfig{}
var enabled int
err := row.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, err
}
dataset.Enabled = enabled == 1
return dataset, nil
}
// SaveDataset saves or updates a dataset
func (d *Database) SaveDataset(dataset *DatasetConfig) error {
enabled := 0
if dataset.Enabled {
enabled = 1
}
if dataset.ID == 0 {
// Insert new
_, err := d.db.Exec(`INSERT INTO datasets (client_id, dataset_name, storage_type, enabled) VALUES (?, ?, ?, ?)`,
dataset.ClientID, dataset.DatasetName, dataset.StorageType, enabled)
return err
}
// Update existing
_, err := d.db.Exec(`UPDATE datasets SET storage_type = ?, enabled = ? WHERE id = ?`,
dataset.StorageType, enabled, dataset.ID)
return err
}
// DeleteDataset deletes a dataset
func (d *Database) DeleteDataset(id int64) error {
_, err := d.db.Exec(`DELETE FROM datasets WHERE id = ?`, id)
return err
}
// GetAllDatasets gets all datasets
func (d *Database) GetAllDatasets() ([]*DatasetConfig, error) {
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets`
rows, err := d.db.Query(query)
if err != nil {
return nil, err
}
defer rows.Close()
var datasets []*DatasetConfig
for rows.Next() {
dataset := &DatasetConfig{}
var enabled int
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
if err != nil {
return nil, err
}
dataset.Enabled = enabled == 1
datasets = append(datasets, dataset)
}
return datasets, nil
}
// GetSnapshotByID retrieves a specific snapshot // GetSnapshotByID retrieves a specific snapshot
func (d *Database) GetSnapshotByID(clientID, snapshotID string) (*SnapshotMetadata, error) { func (d *Database) GetSnapshotByID(clientID, snapshotID string) (*SnapshotMetadata, error) {
snap := &SnapshotMetadata{} snap := &SnapshotMetadata{}

View File

@@ -271,6 +271,36 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
return return
} }
// Check if dataset is allowed for this client
dataset, err := s.db.GetDatasetByName(clientID, datasetName)
if err != nil || dataset == nil {
// Auto-create dataset if not exists
log.Printf("Dataset %s not found for client %s, creating...", datasetName, clientID)
newDataset := &DatasetConfig{
ClientID: clientID,
DatasetName: datasetName,
StorageType: "s3",
Enabled: true,
}
if err := s.db.SaveDataset(newDataset); err != nil {
log.Printf("Error creating dataset: %v", err)
respondJSON(w, http.StatusForbidden, UploadResponse{
Success: false,
Message: "Dataset not configured for this client",
})
return
}
dataset = newDataset
}
if !dataset.Enabled {
respondJSON(w, http.StatusForbidden, UploadResponse{
Success: false,
Message: "Dataset is disabled",
})
return
}
ctx := context.Background() ctx := context.Background()
// Upload to S3 // Upload to S3
@@ -438,13 +468,6 @@ func (s *Server) HandleDownload(w http.ResponseWriter, r *http.Request) {
return return
} }
// Find snapshot metadata
client, err := s.db.GetClient(clientID)
if err != nil || client == nil {
http.Error(w, "Client not found", http.StatusNotFound)
return
}
targetSnapshot, err := s.db.GetSnapshotByID(clientID, snapshotID) targetSnapshot, err := s.db.GetSnapshotByID(clientID, snapshotID)
if err != nil || targetSnapshot == nil { if err != nil || targetSnapshot == nil {
http.Error(w, "Snapshot not found", http.StatusNotFound) http.Error(w, "Snapshot not found", http.StatusNotFound)