multi dataset
This commit is contained in:
@@ -23,9 +23,16 @@ func main() {
|
|||||||
switch command {
|
switch command {
|
||||||
case "snap", "snapshot":
|
case "snap", "snapshot":
|
||||||
// Create snapshot and send to server (auto full/incremental)
|
// Create snapshot and send to server (auto full/incremental)
|
||||||
|
// Optional: specify dataset as argument
|
||||||
|
targetDataset := ""
|
||||||
|
if len(os.Args) > 2 {
|
||||||
|
targetDataset = os.Args[2]
|
||||||
|
fmt.Printf("→ Using dataset: %s\n", targetDataset)
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Println("=== Creating and sending snapshot ===\n")
|
fmt.Println("=== Creating and sending snapshot ===\n")
|
||||||
|
|
||||||
snapshot, err := c.CreateAndSend()
|
snapshot, err := c.CreateAndSend(targetDataset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Error: %v\n", err)
|
fmt.Printf("Error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -56,11 +63,12 @@ func main() {
|
|||||||
|
|
||||||
func printUsage() {
|
func printUsage() {
|
||||||
fmt.Println("ZFS Snapshot Backup Client - Simple Version")
|
fmt.Println("ZFS Snapshot Backup Client - Simple Version")
|
||||||
fmt.Println("\nUsage: zfs-client [command]")
|
fmt.Println("\nUsage: zfs-client [command] [dataset]")
|
||||||
fmt.Println("\nCommands:")
|
fmt.Println("\nCommands:")
|
||||||
fmt.Println(" snap - Create snapshot and send to server")
|
fmt.Println(" snap [dataset] - Create snapshot and send to server")
|
||||||
fmt.Println(" status - Check server connection and quota")
|
fmt.Println(" If dataset not specified, uses LOCAL_DATASET from config")
|
||||||
fmt.Println(" help - Show this help message")
|
fmt.Println(" status - Check server connection and quota")
|
||||||
|
fmt.Println(" help - Show this help message")
|
||||||
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||||
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
||||||
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
||||||
@@ -68,6 +76,7 @@ func printUsage() {
|
|||||||
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
|
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
|
||||||
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
|
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
|
||||||
fmt.Println("\nExamples:")
|
fmt.Println("\nExamples:")
|
||||||
fmt.Println(" zfs-client snap")
|
fmt.Println(" zfs-client snap # Use configured dataset")
|
||||||
|
fmt.Println(" zfs-client snap tank/data # Backup specific dataset")
|
||||||
fmt.Println(" zfs-client status")
|
fmt.Println(" zfs-client status")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,7 +38,13 @@ func New(config *Config) *Client {
|
|||||||
// It automatically detects if this is a full or incremental backup:
|
// It automatically detects if this is a full or incremental backup:
|
||||||
// - If no bookmark exists, does a full backup
|
// - If no bookmark exists, does a full backup
|
||||||
// - If bookmark exists, does an incremental backup from the bookmark
|
// - If bookmark exists, does an incremental backup from the bookmark
|
||||||
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
// If targetDataset is provided, it overrides the configured dataset.
|
||||||
|
func (c *Client) CreateAndSend(targetDataset string) (*SnapshotResult, error) {
|
||||||
|
// Use provided dataset or fall back to config
|
||||||
|
if targetDataset == "" {
|
||||||
|
targetDataset = c.config.LocalDataset
|
||||||
|
}
|
||||||
|
|
||||||
// Check for existing bookmark to determine backup type
|
// Check for existing bookmark to determine backup type
|
||||||
lastBookmark, err := c.GetLastBookmark()
|
lastBookmark, err := c.GetLastBookmark()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -46,7 +52,7 @@ func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create new snapshot
|
// Create new snapshot
|
||||||
snapshot, err := c.CreateSnapshot()
|
snapshot, err := c.CreateSnapshot(targetDataset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||||
}
|
}
|
||||||
@@ -55,13 +61,13 @@ func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
|||||||
if isFullBackup {
|
if isFullBackup {
|
||||||
fmt.Println("→ No previous backup found, doing FULL backup...")
|
fmt.Println("→ No previous backup found, doing FULL backup...")
|
||||||
// Send as full (no base)
|
// Send as full (no base)
|
||||||
if err := c.SendIncrementalHTTP(snapshot, ""); err != nil {
|
if err := c.SendIncrementalHTTP(snapshot, targetDataset, ""); err != nil {
|
||||||
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark)
|
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark)
|
||||||
// Send as incremental from bookmark
|
// Send as incremental from bookmark
|
||||||
if err := c.SendIncrementalHTTP(snapshot, lastBookmark); err != nil {
|
if err := c.SendIncrementalHTTP(snapshot, targetDataset, lastBookmark); err != nil {
|
||||||
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -78,8 +84,8 @@ func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
|
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
|
||||||
func (c *Client) CreateSnapshot() (*zfs.Dataset, error) {
|
func (c *Client) CreateSnapshot(dataset string) (*zfs.Dataset, error) {
|
||||||
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
ds, err := zfs.GetDataset(dataset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||||
}
|
}
|
||||||
@@ -105,7 +111,8 @@ func (c *Client) GetSnapshotSize(snapshot *zfs.Dataset) int64 {
|
|||||||
|
|
||||||
// SendIncrementalHTTP sends a snapshot to the server via HTTP.
|
// SendIncrementalHTTP sends a snapshot to the server via HTTP.
|
||||||
// The server then handles storage (S3 or local ZFS).
|
// The server then handles storage (S3 or local ZFS).
|
||||||
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error {
|
// datasetName should be the ZFS dataset being backed up (e.g., "tank/data")
|
||||||
|
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, datasetName, base string) error {
|
||||||
estimatedSize := c.GetSnapshotSize(snapshot)
|
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||||
|
|
||||||
// Determine if this is incremental or full
|
// Determine if this is incremental or full
|
||||||
@@ -115,7 +122,7 @@ func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error {
|
|||||||
uploadReq := map[string]interface{}{
|
uploadReq := map[string]interface{}{
|
||||||
"client_id": c.config.ClientID,
|
"client_id": c.config.ClientID,
|
||||||
"api_key": c.config.APIKey,
|
"api_key": c.config.APIKey,
|
||||||
"dataset_name": c.config.LocalDataset,
|
"dataset_name": datasetName,
|
||||||
"timestamp": time.Now().Format(time.RFC3339),
|
"timestamp": time.Now().Format(time.RFC3339),
|
||||||
"compressed": c.config.Compress,
|
"compressed": c.config.Compress,
|
||||||
"estimated_size": estimatedSize,
|
"estimated_size": estimatedSize,
|
||||||
|
|||||||
@@ -72,6 +72,6 @@ func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SendIncremental is kept for API compatibility - now just calls HTTP version
|
// SendIncremental is kept for API compatibility - now just calls HTTP version
|
||||||
func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
|
func (c *Client) SendIncremental(snapshot *zfs.Dataset, datasetName, base string) error {
|
||||||
return c.SendIncrementalHTTP(snapshot, base)
|
return c.SendIncrementalHTTP(snapshot, datasetName, base)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.ma-al.com/goc_marek/zfs/internal/server/templates/pages"
|
"git.ma-al.com/goc_marek/zfs/internal/server/templates/pages"
|
||||||
@@ -518,6 +520,118 @@ func (s *Server) handleAdminGetStats(w http.ResponseWriter, r *http.Request) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleAdminGetDatasets returns all datasets, optionally filtered by client
|
||||||
|
func (s *Server) handleAdminGetDatasets(w http.ResponseWriter, r *http.Request) {
|
||||||
|
admin, err := s.authenticateAdmin(r)
|
||||||
|
if err != nil || admin == nil {
|
||||||
|
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
clientID := r.URL.Query().Get("client_id")
|
||||||
|
|
||||||
|
var datasets []*DatasetConfig
|
||||||
|
if clientID != "" {
|
||||||
|
datasets, _ = s.db.GetDatasetsByClient(clientID)
|
||||||
|
} else {
|
||||||
|
datasets, _ = s.db.GetAllDatasets()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get snapshot counts for each dataset
|
||||||
|
type DatasetResponse struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
DatasetName string `json:"dataset_name"`
|
||||||
|
StorageType string `json:"storage_type"`
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
SnapshotCount int `json:"snapshot_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
response := make([]DatasetResponse, len(datasets))
|
||||||
|
for i, d := range datasets {
|
||||||
|
snapshotCount, _ := s.db.GetSnapshotCountByDataset(d.ClientID, d.DatasetName)
|
||||||
|
response[i] = DatasetResponse{
|
||||||
|
ID: d.ID,
|
||||||
|
ClientID: d.ClientID,
|
||||||
|
DatasetName: d.DatasetName,
|
||||||
|
StorageType: d.StorageType,
|
||||||
|
Enabled: d.Enabled,
|
||||||
|
SnapshotCount: snapshotCount,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleAdminUpdateDeleteDataset handles PUT and DELETE for a specific dataset
|
||||||
|
func (s *Server) handleAdminUpdateDeleteDataset(w http.ResponseWriter, r *http.Request) {
|
||||||
|
admin, err := s.authenticateAdmin(r)
|
||||||
|
if err != nil || admin == nil {
|
||||||
|
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract dataset ID from URL
|
||||||
|
parts := strings.Split(r.URL.Path, "/")
|
||||||
|
if len(parts) < 4 {
|
||||||
|
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
datasetID, err := strconv.ParseInt(parts[len(parts)-1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Invalid dataset ID", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get dataset from database
|
||||||
|
dataset, err := s.db.GetDatasetByID(datasetID)
|
||||||
|
if err != nil || dataset == nil {
|
||||||
|
http.Error(w, "Dataset not found", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method == http.MethodDelete {
|
||||||
|
// Delete dataset
|
||||||
|
if err := s.db.DeleteDataset(datasetID); err != nil {
|
||||||
|
http.Error(w, "Failed to delete dataset", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||||
|
"success": true,
|
||||||
|
"message": "Dataset deleted successfully",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method == http.MethodPut {
|
||||||
|
// Update dataset
|
||||||
|
var req struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset.Enabled = req.Enabled
|
||||||
|
if err := s.db.SaveDataset(dataset); err != nil {
|
||||||
|
http.Error(w, "Failed to update dataset", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||||
|
"success": true,
|
||||||
|
"message": "Dataset updated successfully",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
}
|
||||||
|
|
||||||
// Admin management handlers
|
// Admin management handlers
|
||||||
|
|
||||||
// handleAdminGetAdmins returns all admins
|
// handleAdminGetAdmins returns all admins
|
||||||
|
|||||||
@@ -92,6 +92,7 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
|
|
||||||
<div class="tabs">
|
<div class="tabs">
|
||||||
<button class="tab active" data-tab="clients" onclick="showTab('clients')">Clients</button>
|
<button class="tab active" data-tab="clients" onclick="showTab('clients')">Clients</button>
|
||||||
|
<button class="tab" data-tab="datasets" onclick="showTab('datasets')">Datasets</button>
|
||||||
<button class="tab" data-tab="snapshots" onclick="showTab('snapshots')">Snapshots</button>
|
<button class="tab" data-tab="snapshots" onclick="showTab('snapshots')">Snapshots</button>
|
||||||
<button class="tab" data-tab="admins" onclick="showTab('admins')">Admins</button>
|
<button class="tab" data-tab="admins" onclick="showTab('admins')">Admins</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -121,6 +122,32 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div id="datasets-tab" class="hidden">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h3>Datasets</h3>
|
||||||
|
<select id="dataset-client-filter" onchange="loadDatasets()">
|
||||||
|
<option value="">All Clients</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Client</th>
|
||||||
|
<th>Dataset Name</th>
|
||||||
|
<th>Storage Type</th>
|
||||||
|
<th>Status</th>
|
||||||
|
<th>Snapshots</th>
|
||||||
|
<th>Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="datasets-table"></tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div id="snapshots-tab" class="hidden">
|
<div id="snapshots-tab" class="hidden">
|
||||||
<div class="card">
|
<div class="card">
|
||||||
<div class="card-header">
|
<div class="card-header">
|
||||||
@@ -134,9 +161,11 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Client</th>
|
<th>Client</th>
|
||||||
|
<th>Dataset</th>
|
||||||
<th>Snapshot ID</th>
|
<th>Snapshot ID</th>
|
||||||
<th>Timestamp</th>
|
<th>Timestamp</th>
|
||||||
<th>Size</th>
|
<th>Size</th>
|
||||||
|
<th>Storage</th>
|
||||||
<th>Type</th>
|
<th>Type</th>
|
||||||
<th>Actions</th>
|
<th>Actions</th>
|
||||||
</tr>
|
</tr>
|
||||||
@@ -474,6 +503,76 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load datasets
|
||||||
|
async function loadDatasets() {
|
||||||
|
const clientId = document.getElementById('dataset-client-filter').value;
|
||||||
|
const url = '/admin/datasets' + (clientId ? '?client_id=' + clientId : '');
|
||||||
|
|
||||||
|
try {
|
||||||
|
const res = await fetch(url);
|
||||||
|
const datasets = await res.json();
|
||||||
|
|
||||||
|
const tbody = document.getElementById('datasets-table');
|
||||||
|
tbody.innerHTML = datasets.map(d =>
|
||||||
|
'<tr>' +
|
||||||
|
'<td>' + d.client_id + '</td>' +
|
||||||
|
'<td><strong>' + d.dataset_name + '</strong></td>' +
|
||||||
|
'<td><span class="badge badge-info">' + d.storage_type + '</span></td>' +
|
||||||
|
'<td>' + (d.enabled ? '<span class="badge badge-success">Enabled</span>' : '<span class="badge badge-danger">Disabled</span>') + '</td>' +
|
||||||
|
'<td>' + (d.snapshot_count || 0) + '</td>' +
|
||||||
|
'<td>' +
|
||||||
|
'<button class="btn btn-sm ' + (d.enabled ? 'btn-danger' : 'btn-success') + '" onclick="toggleDataset(' + d.id + ', ' + !d.enabled + ')">' + (d.enabled ? 'Disable' : 'Enable') + '</button>' +
|
||||||
|
'<button class="btn btn-sm btn-danger" onclick="deleteDataset(' + d.id + ', \'' + d.dataset_name + '\')">Delete</button>' +
|
||||||
|
'</td>' +
|
||||||
|
'</tr>'
|
||||||
|
).join('');
|
||||||
|
|
||||||
|
// Update client filter if not set
|
||||||
|
if (!clientId) {
|
||||||
|
const clientsRes = await fetch('/admin/clients');
|
||||||
|
const clients = await clientsRes.json();
|
||||||
|
const filter = document.getElementById('dataset-client-filter');
|
||||||
|
filter.innerHTML = '<option value="">All Clients</option>' +
|
||||||
|
clients.map(c => '<option value="' + c.client_id + '">' + c.client_id + '</option>').join('');
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to load datasets:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Toggle dataset enabled/disabled
|
||||||
|
async function toggleDataset(id, enabled) {
|
||||||
|
try {
|
||||||
|
const res = await fetch('/admin/datasets/' + id, {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: {'Content-Type': 'application/json'},
|
||||||
|
body: JSON.stringify({enabled: enabled})
|
||||||
|
});
|
||||||
|
if (res.ok) {
|
||||||
|
loadDatasets();
|
||||||
|
} else {
|
||||||
|
alert('Failed to update dataset');
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to toggle dataset:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete dataset
|
||||||
|
async function deleteDataset(id, name) {
|
||||||
|
if (!confirm('Delete dataset ' + name + '?')) return;
|
||||||
|
try {
|
||||||
|
const res = await fetch('/admin/datasets/' + id, {method: 'DELETE'});
|
||||||
|
if (res.ok) {
|
||||||
|
loadDatasets();
|
||||||
|
} else {
|
||||||
|
alert('Failed to delete dataset');
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to delete dataset:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Load snapshots
|
// Load snapshots
|
||||||
async function loadSnapshots() {
|
async function loadSnapshots() {
|
||||||
const clientId = document.getElementById('snapshot-client-filter').value;
|
const clientId = document.getElementById('snapshot-client-filter').value;
|
||||||
@@ -488,12 +587,14 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
const sizeGB = (s.size_bytes / (1024*1024*1024)).toFixed(2);
|
const sizeGB = (s.size_bytes / (1024*1024*1024)).toFixed(2);
|
||||||
return '<tr>' +
|
return '<tr>' +
|
||||||
'<td>' + s.client_id + '</td>' +
|
'<td>' + s.client_id + '</td>' +
|
||||||
|
'<td>' + (s.dataset_name || '-') + '</td>' +
|
||||||
'<td>' + s.snapshot_id + '</td>' +
|
'<td>' + s.snapshot_id + '</td>' +
|
||||||
'<td>' + new Date(s.timestamp).toLocaleString() + '</td>' +
|
'<td>' + new Date(s.timestamp).toLocaleString() + '</td>' +
|
||||||
'<td>' + sizeGB + ' GB</td>' +
|
'<td>' + sizeGB + ' GB</td>' +
|
||||||
|
'<td><span class="badge ' + (s.storage_type === 's3' ? 'badge-info' : 'badge-warning') + '">' + s.storage_type + '</span></td>' +
|
||||||
'<td>' +
|
'<td>' +
|
||||||
(s.incremental ? '<span class="badge badge-info">Incremental</span>' : '<span class="badge badge-success">Full</span>') +
|
(s.incremental ? '<span class="badge badge-info">Inc</span>' : '<span class="badge badge-success">Full</span>') +
|
||||||
(s.compressed ? ' <span class="badge badge-info">Compressed</span>' : '') +
|
(s.compressed ? ' <span class="badge badge-info">LZ4</span>' : '') +
|
||||||
'</td>' +
|
'</td>' +
|
||||||
'<td><button class="btn btn-sm btn-danger" onclick="deleteSnapshot(\'' + s.client_id + '\', \'' + s.snapshot_id + '\')">Delete</button></td>' +
|
'<td><button class="btn btn-sm btn-danger" onclick="deleteSnapshot(\'' + s.client_id + '\', \'' + s.snapshot_id + '\')">Delete</button></td>' +
|
||||||
'</tr>';
|
'</tr>';
|
||||||
@@ -534,12 +635,14 @@ const adminPanelHTML = `<!DOCTYPE html>
|
|||||||
document.querySelector('.tab[data-tab="' + tab + '"]').classList.add('active');
|
document.querySelector('.tab[data-tab="' + tab + '"]').classList.add('active');
|
||||||
|
|
||||||
document.getElementById('clients-tab').classList.add('hidden');
|
document.getElementById('clients-tab').classList.add('hidden');
|
||||||
|
document.getElementById('datasets-tab').classList.add('hidden');
|
||||||
document.getElementById('snapshots-tab').classList.add('hidden');
|
document.getElementById('snapshots-tab').classList.add('hidden');
|
||||||
document.getElementById('admins-tab').classList.add('hidden');
|
document.getElementById('admins-tab').classList.add('hidden');
|
||||||
document.getElementById(tab + '-tab').classList.remove('hidden');
|
document.getElementById(tab + '-tab').classList.remove('hidden');
|
||||||
|
|
||||||
if (tab === 'snapshots') loadSnapshots();
|
if (tab === 'snapshots') loadSnapshots();
|
||||||
if (tab === 'admins') loadAdmins();
|
if (tab === 'admins') loadAdmins();
|
||||||
|
if (tab === 'datasets') loadDatasets();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Modal functions
|
// Modal functions
|
||||||
|
|||||||
@@ -89,6 +89,23 @@ func (d *Database) initTables() error {
|
|||||||
return fmt.Errorf("failed to create clients table: %v", err)
|
return fmt.Errorf("failed to create clients table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Datasets table - multiple datasets per client
|
||||||
|
_, err = d.db.Exec(`
|
||||||
|
CREATE TABLE IF NOT EXISTS datasets (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
client_id TEXT NOT NULL,
|
||||||
|
dataset_name TEXT NOT NULL,
|
||||||
|
storage_type TEXT NOT NULL DEFAULT 's3',
|
||||||
|
enabled INTEGER NOT NULL DEFAULT 1,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (client_id) REFERENCES clients(client_id) ON DELETE CASCADE,
|
||||||
|
UNIQUE(client_id, dataset_name)
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create datasets table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Snapshots table
|
// Snapshots table
|
||||||
_, err = d.db.Exec(`
|
_, err = d.db.Exec(`
|
||||||
CREATE TABLE IF NOT EXISTS snapshots (
|
CREATE TABLE IF NOT EXISTS snapshots (
|
||||||
@@ -400,6 +417,162 @@ func (d *Database) CreateDefaultClient() error {
|
|||||||
return d.SaveClient(defaultClient)
|
return d.SaveClient(defaultClient)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateDefaultDataset creates a default dataset for a client if none exists
|
||||||
|
func (d *Database) CreateDefaultDataset(clientID, datasetName string) error {
|
||||||
|
datasets, err := d.GetDatasetsByClient(clientID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(datasets) > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create default dataset
|
||||||
|
dataset := &DatasetConfig{
|
||||||
|
ClientID: clientID,
|
||||||
|
DatasetName: datasetName,
|
||||||
|
StorageType: "s3",
|
||||||
|
Enabled: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.SaveDataset(dataset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DatasetConfig represents a dataset configuration
|
||||||
|
type DatasetConfig struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
DatasetName string `json:"dataset_name"`
|
||||||
|
StorageType string `json:"storage_type"`
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDatasetsByClient gets all datasets for a client
|
||||||
|
func (d *Database) GetDatasetsByClient(clientID string) ([]*DatasetConfig, error) {
|
||||||
|
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ?`
|
||||||
|
|
||||||
|
rows, err := d.db.Query(query, clientID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var datasets []*DatasetConfig
|
||||||
|
for rows.Next() {
|
||||||
|
dataset := &DatasetConfig{}
|
||||||
|
var enabled int
|
||||||
|
|
||||||
|
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset.Enabled = enabled == 1
|
||||||
|
datasets = append(datasets, dataset)
|
||||||
|
}
|
||||||
|
|
||||||
|
return datasets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDatasetByName gets a dataset by client and dataset name
|
||||||
|
func (d *Database) GetDatasetByName(clientID, datasetName string) (*DatasetConfig, error) {
|
||||||
|
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ? AND dataset_name = ?`
|
||||||
|
|
||||||
|
row := d.db.QueryRow(query, clientID, datasetName)
|
||||||
|
|
||||||
|
dataset := &DatasetConfig{}
|
||||||
|
var enabled int
|
||||||
|
|
||||||
|
err := row.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset.Enabled = enabled == 1
|
||||||
|
return dataset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveDataset saves or updates a dataset
|
||||||
|
func (d *Database) SaveDataset(dataset *DatasetConfig) error {
|
||||||
|
enabled := 0
|
||||||
|
if dataset.Enabled {
|
||||||
|
enabled = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if dataset.ID == 0 {
|
||||||
|
// Insert new
|
||||||
|
_, err := d.db.Exec(`INSERT INTO datasets (client_id, dataset_name, storage_type, enabled) VALUES (?, ?, ?, ?)`,
|
||||||
|
dataset.ClientID, dataset.DatasetName, dataset.StorageType, enabled)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update existing
|
||||||
|
_, err := d.db.Exec(`UPDATE datasets SET storage_type = ?, enabled = ? WHERE id = ?`,
|
||||||
|
dataset.StorageType, enabled, dataset.ID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteDataset deletes a dataset
|
||||||
|
func (d *Database) DeleteDataset(id int64) error {
|
||||||
|
_, err := d.db.Exec(`DELETE FROM datasets WHERE id = ?`, id)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDatasetByID gets a dataset by ID
|
||||||
|
func (d *Database) GetDatasetByID(id int64) (*DatasetConfig, error) {
|
||||||
|
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE id = ?`
|
||||||
|
|
||||||
|
row := d.db.QueryRow(query, id)
|
||||||
|
dataset := &DatasetConfig{}
|
||||||
|
var enabled int
|
||||||
|
|
||||||
|
err := row.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset.Enabled = enabled == 1
|
||||||
|
return dataset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSnapshotCountByDataset gets snapshot count for a specific dataset
|
||||||
|
func (d *Database) GetSnapshotCountByDataset(clientID, datasetName string) (int, error) {
|
||||||
|
var count int
|
||||||
|
err := d.db.QueryRow(`SELECT COUNT(*) FROM snapshots WHERE client_id = ? AND dataset_name = ?`, clientID, datasetName).Scan(&count)
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllDatasets gets all datasets
|
||||||
|
func (d *Database) GetAllDatasets() ([]*DatasetConfig, error) {
|
||||||
|
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets`
|
||||||
|
|
||||||
|
rows, err := d.db.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var datasets []*DatasetConfig
|
||||||
|
for rows.Next() {
|
||||||
|
dataset := &DatasetConfig{}
|
||||||
|
var enabled int
|
||||||
|
|
||||||
|
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset.Enabled = enabled == 1
|
||||||
|
datasets = append(datasets, dataset)
|
||||||
|
}
|
||||||
|
|
||||||
|
return datasets, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetSnapshotByID retrieves a specific snapshot
|
// GetSnapshotByID retrieves a specific snapshot
|
||||||
func (d *Database) GetSnapshotByID(clientID, snapshotID string) (*SnapshotMetadata, error) {
|
func (d *Database) GetSnapshotByID(clientID, snapshotID string) (*SnapshotMetadata, error) {
|
||||||
snap := &SnapshotMetadata{}
|
snap := &SnapshotMetadata{}
|
||||||
|
|||||||
@@ -271,6 +271,36 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if dataset is allowed for this client
|
||||||
|
dataset, err := s.db.GetDatasetByName(clientID, datasetName)
|
||||||
|
if err != nil || dataset == nil {
|
||||||
|
// Auto-create dataset if not exists
|
||||||
|
log.Printf("Dataset %s not found for client %s, creating...", datasetName, clientID)
|
||||||
|
newDataset := &DatasetConfig{
|
||||||
|
ClientID: clientID,
|
||||||
|
DatasetName: datasetName,
|
||||||
|
StorageType: "s3",
|
||||||
|
Enabled: true,
|
||||||
|
}
|
||||||
|
if err := s.db.SaveDataset(newDataset); err != nil {
|
||||||
|
log.Printf("Error creating dataset: %v", err)
|
||||||
|
respondJSON(w, http.StatusForbidden, UploadResponse{
|
||||||
|
Success: false,
|
||||||
|
Message: "Dataset not configured for this client",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dataset = newDataset
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dataset.Enabled {
|
||||||
|
respondJSON(w, http.StatusForbidden, UploadResponse{
|
||||||
|
Success: false,
|
||||||
|
Message: "Dataset is disabled",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Upload to S3
|
// Upload to S3
|
||||||
@@ -438,13 +468,6 @@ func (s *Server) HandleDownload(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find snapshot metadata
|
|
||||||
client, err := s.db.GetClient(clientID)
|
|
||||||
if err != nil || client == nil {
|
|
||||||
http.Error(w, "Client not found", http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
targetSnapshot, err := s.db.GetSnapshotByID(clientID, snapshotID)
|
targetSnapshot, err := s.db.GetSnapshotByID(clientID, snapshotID)
|
||||||
if err != nil || targetSnapshot == nil {
|
if err != nil || targetSnapshot == nil {
|
||||||
http.Error(w, "Snapshot not found", http.StatusNotFound)
|
http.Error(w, "Snapshot not found", http.StatusNotFound)
|
||||||
@@ -560,6 +583,8 @@ func (s *Server) RegisterRoutes(mux *http.ServeMux) {
|
|||||||
mux.HandleFunc("/admin/snapshots", s.handleAdminGetSnapshots)
|
mux.HandleFunc("/admin/snapshots", s.handleAdminGetSnapshots)
|
||||||
mux.HandleFunc("/admin/snapshot/delete", s.handleAdminDeleteSnapshot)
|
mux.HandleFunc("/admin/snapshot/delete", s.handleAdminDeleteSnapshot)
|
||||||
mux.HandleFunc("/admin/stats", s.handleAdminGetStats)
|
mux.HandleFunc("/admin/stats", s.handleAdminGetStats)
|
||||||
|
mux.HandleFunc("/admin/datasets", s.handleAdminGetDatasets)
|
||||||
|
mux.HandleFunc("/admin/datasets/{id}", s.handleAdminUpdateDeleteDataset)
|
||||||
mux.HandleFunc("/admin/admins", s.handleAdminGetAdmins)
|
mux.HandleFunc("/admin/admins", s.handleAdminGetAdmins)
|
||||||
mux.HandleFunc("/admin/admin/create", s.handleAdminCreateAdmin)
|
mux.HandleFunc("/admin/admin/create", s.handleAdminCreateAdmin)
|
||||||
mux.HandleFunc("/admin/admin/delete", s.handleAdminDeleteAdmin)
|
mux.HandleFunc("/admin/admin/delete", s.handleAdminDeleteAdmin)
|
||||||
|
|||||||
Reference in New Issue
Block a user