fix
This commit is contained in:
10
.env
10
.env
@@ -3,7 +3,7 @@
|
||||
# ===========================================
|
||||
|
||||
# S3 Configuration (Server)
|
||||
S3_ENABLED=true
|
||||
S3_ENABLED=false
|
||||
S3_ENDPOINT=localhost:9000
|
||||
S3_ACCESS_KEY=minioadmin
|
||||
S3_SECRET_KEY=minioadmin
|
||||
@@ -22,8 +22,10 @@ PORT=8080
|
||||
# Client Configuration
|
||||
# ===========================================
|
||||
CLIENT_ID=client1
|
||||
API_KEY=fcf730b6d95236ecd3c9fc2d92d7b6b2bb061514961aec041d6c7a7192f592e4
|
||||
# NOTE: Use the RAW API key here, not the hashed version!
|
||||
# The server stores the hash in clients.json, client sends raw key
|
||||
API_KEY=secret123
|
||||
SERVER_URL=http://localhost:8080
|
||||
LOCAL_DATASET=zfs/data
|
||||
LOCAL_DATASET=volume/test
|
||||
COMPRESS=true
|
||||
STORAGE_TYPE=s3
|
||||
STORAGE_TYPE=local
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"max_size_bytes": 107374182400,
|
||||
"dataset": "zfs/client1",
|
||||
"enabled": true,
|
||||
"storage_type": "s3",
|
||||
"storage_type": "local",
|
||||
"rotation_policy": {
|
||||
"keep_hourly": 24,
|
||||
"keep_daily": 7,
|
||||
|
||||
@@ -31,17 +31,21 @@ func main() {
|
||||
|
||||
localBackend := server.NewLocalBackend(cfg.BaseDataset)
|
||||
|
||||
// Create metadata directory if needed
|
||||
// Create metadata directory if needed (only if path contains a directory)
|
||||
if idx := len(cfg.MetadataFile) - 1; idx > 0 {
|
||||
dir := cfg.MetadataFile
|
||||
if idx := len(dir) - 1; idx > 0 {
|
||||
foundSlash := false
|
||||
for i := len(dir) - 1; i >= 0; i-- {
|
||||
if dir[i] == '/' {
|
||||
dir = dir[:i]
|
||||
foundSlash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundSlash && dir != "" {
|
||||
os.MkdirAll(dir, 0755)
|
||||
}
|
||||
}
|
||||
|
||||
srv := server.New(cfg.ConfigFile, cfg.MetadataFile, s3Backend, localBackend)
|
||||
|
||||
|
||||
@@ -134,9 +134,11 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
|
||||
gzWriter := gzip.NewWriter(pw)
|
||||
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
defer gzWriter.Close()
|
||||
// Copy zfs output to gzip writer
|
||||
io.Copy(gzWriter, zfsOut)
|
||||
// Close gzip writer first to flush footer, then close pipe
|
||||
gzWriter.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
reader = pr
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -186,7 +187,11 @@ func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
|
||||
fmt.Printf(" Type: %s\n", uploadMethod)
|
||||
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||
|
||||
// Choose upload method based on server response
|
||||
if uploadResp.UploadMethod == "s3" {
|
||||
return c.streamIncrementalToS3(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||
}
|
||||
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
|
||||
}
|
||||
|
||||
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3.
|
||||
@@ -222,9 +227,11 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
|
||||
gzWriter := gzip.NewWriter(pw)
|
||||
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
defer gzWriter.Close()
|
||||
// Copy zfs output to gzip writer
|
||||
io.Copy(gzWriter, zfsOut)
|
||||
// Close gzip writer first to flush footer, then close pipe
|
||||
gzWriter.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
reader = pr
|
||||
@@ -289,6 +296,50 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
|
||||
// This method is used when the server uses local ZFS storage.
|
||||
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
|
||||
fmt.Printf("-> Sending via ZFS send/receive...\n")
|
||||
|
||||
// Extract server host from URL
|
||||
serverHost := c.config.ServerURL
|
||||
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
||||
serverHost = serverHost[7:]
|
||||
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
||||
serverHost = serverHost[8:]
|
||||
}
|
||||
|
||||
// Remove port if present
|
||||
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
||||
serverHost = serverHost[:idx]
|
||||
}
|
||||
|
||||
// Build zfs send command
|
||||
var zfsSendCmd string
|
||||
if base != "" {
|
||||
// Incremental send
|
||||
fmt.Printf(" Base: %s\n", base)
|
||||
zfsSendCmd = fmt.Sprintf("zfs send -i %s %s", base, snapshot.Name)
|
||||
} else {
|
||||
// Full send
|
||||
zfsSendCmd = fmt.Sprintf("zfs send %s", snapshot.Name)
|
||||
}
|
||||
|
||||
// Execute ZFS send over SSH
|
||||
cmd := exec.Command("sh", "-c",
|
||||
fmt.Sprintf("%s | ssh %s 'zfs recv -F %s'", zfsSendCmd, serverHost, receivePath))
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Snapshot sent successfully!\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// RotateLocalSnapshots removes old snapshots based on the retention policy.
|
||||
// This is similar to zfs-auto-snapshot's rotation behavior.
|
||||
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
|
||||
|
||||
@@ -289,7 +289,8 @@ func (s *Server) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
timestamp := time.Now().Format("2006-01-02_15:04:05")
|
||||
|
||||
if client.StorageType == "s3" {
|
||||
// Check if S3 backend is available for S3 storage type
|
||||
if client.StorageType == "s3" && s.s3Backend != nil {
|
||||
// S3 upload
|
||||
storageKey := fmt.Sprintf("%s/%s_%s.zfs", req.ClientID, req.DatasetName, timestamp)
|
||||
if req.Compressed {
|
||||
@@ -303,6 +304,12 @@ func (s *Server) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
||||
StorageKey: storageKey,
|
||||
UploadURL: fmt.Sprintf("/upload-stream/%s", req.ClientID),
|
||||
})
|
||||
} else if client.StorageType == "s3" && s.s3Backend == nil {
|
||||
// S3 requested but not configured
|
||||
respondJSON(w, http.StatusInternalServerError, UploadResponse{
|
||||
Success: false,
|
||||
Message: "S3 storage requested but S3 backend is not configured on server",
|
||||
})
|
||||
} else {
|
||||
// Local ZFS receive
|
||||
snapshotName := fmt.Sprintf("%s@%s_%s", client.Dataset, req.ClientID, timestamp)
|
||||
@@ -346,9 +353,17 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Upload to S3
|
||||
// When using chunked transfer (io.Pipe), ContentLength is -1
|
||||
// MinIO requires -1 for unknown size to use streaming upload
|
||||
size := r.ContentLength
|
||||
if size < 0 {
|
||||
size = 0
|
||||
size = -1 // Use streaming upload for unknown size
|
||||
}
|
||||
|
||||
if s.s3Backend == nil {
|
||||
log.Printf("Error: S3 backend not initialized")
|
||||
http.Error(w, "S3 backend not configured", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
|
||||
|
||||
59
readme.md
59
readme.md
@@ -93,6 +93,8 @@ COMPRESS=true
|
||||
STORAGE_TYPE=s3
|
||||
```
|
||||
|
||||
> **Important**: The `API_KEY` in the client `.env` file must be the **raw (unhashed)** key. The server stores the SHA-256 hash in `clients.json`, and the client sends the raw key which the server then hashes for comparison. For example, if `clients.json` has `api_key: "fcf730b6d95236ecd3c9fc2d92d7b6b2bb061514961aec041d6c7a7192f592e4"` (hash of "secret123"), the client `.env` should have `API_KEY=secret123`.
|
||||
|
||||
### Restore Tool Configuration
|
||||
|
||||
```env
|
||||
@@ -265,6 +267,63 @@ S3_BUCKET=zfs-snapshots
|
||||
S3_USE_SSL=false
|
||||
```
|
||||
|
||||
#### Setting Up MinIO Locally
|
||||
|
||||
**Option A: Using Docker (Recommended)**
|
||||
|
||||
```bash
|
||||
# Create a directory for MinIO data
|
||||
mkdir -p ~/minio-data
|
||||
|
||||
# Run MinIO container
|
||||
docker run -d \
|
||||
--name minio \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
-v ~/minio-data:/data \
|
||||
-e MINIO_ROOT_USER=minioadmin \
|
||||
-e MINIO_ROOT_PASSWORD=minioadmin \
|
||||
minio/minio server /data --console-address ":9001"
|
||||
```
|
||||
|
||||
**Option B: Using Binary**
|
||||
|
||||
```bash
|
||||
# Download MinIO
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
chmod +x minio
|
||||
sudo mv minio /usr/local/bin/
|
||||
|
||||
# Create data directory
|
||||
mkdir -p ~/minio-data
|
||||
|
||||
# Start MinIO
|
||||
MINIO_ROOT_USER=minioadmin MINIO_ROOT_PASSWORD=minioadmin \
|
||||
minio server ~/minio-data --console-address ":9001"
|
||||
```
|
||||
|
||||
**Create the Bucket**
|
||||
|
||||
After starting MinIO, create the bucket using the MinIO Client (mc) or web console:
|
||||
|
||||
```bash
|
||||
# Install MinIO Client (mc)
|
||||
wget https://dl.min.io/client/mc/release/linux-amd64/mc
|
||||
chmod +x mc
|
||||
sudo mv mc /usr/local/bin/
|
||||
|
||||
# Configure alias to local MinIO
|
||||
mc alias set local http://localhost:9000 minioadmin minioadmin
|
||||
|
||||
# Create bucket
|
||||
mc mb local/zfs
|
||||
|
||||
# Verify bucket was created
|
||||
mc ls local
|
||||
```
|
||||
|
||||
Alternatively, access the MinIO Web Console at http://localhost:9001 and create the bucket through the UI (login: `minioadmin` / `minioadmin`).
|
||||
|
||||
### Backblaze B2
|
||||
|
||||
```env
|
||||
|
||||
Reference in New Issue
Block a user