remove s3 from client

This commit is contained in:
2026-02-15 11:41:05 +01:00
parent 5892ac2a2e
commit 8b592db3dd
10 changed files with 291 additions and 582 deletions

View File

@@ -7,12 +7,10 @@ import (
"log"
"net/http"
"os/exec"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/mistifyio/go-zfs"
)
@@ -25,119 +23,92 @@ type StorageBackend interface {
GetSize(ctx context.Context, key string) (int64, error)
}
// S3Backend implements StorageBackend for S3-compatible storage using AWS SDK v2
// S3Backend implements StorageBackend for S3-compatible storage using minio-go
type S3Backend struct {
client *s3.Client
client *minio.Client
bucketName string
}
// NewS3Backend creates a new S3 storage backend
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool, region string) (*S3Backend, error) {
// Ensure endpoint has valid URI scheme
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
if useSSL {
endpoint = "https://" + endpoint
} else {
endpoint = "http://" + endpoint
}
// NewS3Backend creates a new S3 storage backend using minio-go
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
// Create custom HTTP transport with extended timeouts for large file uploads
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: nil,
IdleConnTimeout: 90 * time.Second,
// Connection pooling
MaxIdleConns: 10,
MaxIdleConnsPerHost: 10,
}
// Determine if using custom endpoint (non-AWS)
customEndpoint := endpoint != "" && endpoint != "https://s3.amazonaws.com" && endpoint != "http://s3.amazonaws.com"
// Load AWS config
awsCfg, err := config.LoadDefaultConfig(context.Background(),
config.WithRegion(region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")),
)
if err != nil {
return nil, fmt.Errorf("failed to load AWS config: %v", err)
}
// Create S3 client
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if customEndpoint {
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = true // Required for MinIO and other S3-compatible storage
}
// Set HTTP client with extended timeout for large uploads
o.HTTPClient = &http.Client{
Timeout: 0, // No timeout for large file uploads
}
client, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: useSSL,
Transport: transport,
})
if err != nil {
return nil, fmt.Errorf("failed to create S3 client: %v", err)
}
// Check if bucket exists (or create it for AWS S3)
// Ensure bucket exists
ctx := context.Background()
_, err = s3Client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(bucketName),
})
exists, err := client.BucketExists(ctx, bucketName)
if err != nil {
// Try to create bucket
_, err = s3Client.CreateBucket(ctx, &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
return nil, fmt.Errorf("failed to check bucket: %v", err)
}
if !exists {
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
if err != nil {
log.Printf("Warning: failed to create bucket: %v", err)
} else {
log.Printf("Created S3 bucket: %s", bucketName)
return nil, fmt.Errorf("failed to create bucket: %v", err)
}
log.Printf("Created S3 bucket: %s", bucketName)
}
return &S3Backend{
client: s3Client,
client: client,
bucketName: bucketName,
}, nil
}
// Upload uploads data to S3
// Upload uploads data to S3 using minio-go
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
Body: data,
ContentType: aws.String("application/octet-stream"),
})
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
minio.PutObjectOptions{
ContentType: "application/octet-stream",
PartSize: 10 * 1024 * 1024, // 10MB parts
})
return err
}
// Download retrieves data from S3
func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
resp, err := s.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
obj, err := s.client.GetObject(ctx, s.bucketName, key, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
return resp.Body, nil
return obj, nil
}
// Delete removes an object from S3
func (s *S3Backend) Delete(ctx context.Context, key string) error {
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
return err
return s.client.RemoveObject(ctx, s.bucketName, key, minio.RemoveObjectOptions{})
}
// List returns all objects with the given prefix
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
var keys []string
paginator := s3.NewListObjectsV2Paginator(s.client, &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucketName),
Prefix: aws.String(prefix),
objectCh := s.client.ListObjects(ctx, s.bucketName, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: true,
})
for paginator.HasMorePages() {
page, err := paginator.NextPage(ctx)
if err != nil {
return nil, err
}
for _, obj := range page.Contents {
keys = append(keys, *obj.Key)
for object := range objectCh {
if object.Err != nil {
return nil, object.Err
}
keys = append(keys, object.Key)
}
return keys, nil
@@ -145,14 +116,11 @@ func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
// GetSize returns the size of an object in S3
func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) {
info, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
info, err := s.client.StatObject(ctx, s.bucketName, key, minio.StatObjectOptions{})
if err != nil {
return 0, err
}
return *info.ContentLength, nil
return info.Size, nil
}
// LocalBackend implements StorageBackend for local ZFS storage