Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions storagesystem/rclone.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
)

var logger = log.Logger("storage")
Expand Down Expand Up @@ -53,10 +54,19 @@ func (h RCloneHandler) Write(ctx context.Context, path string, in io.Reader) (fs
}

func (h RCloneHandler) Move(ctx context.Context, from fs.Object, to string) (fs.Object, error) {
if h.fs.Features().Move != nil {
return h.fs.Features().Move(ctx, from, to)
// Use rclone's operations.Move which handles all fallback logic:
// - Uses server-side Move if available
// - Falls back to server-side Copy+Delete if Move not supported
// - Falls back to download+upload+delete as last resort
logger.Debugf("Moving %s to %s", from.Remote(), to)

// operations.Move expects the destination fs and remote path
newObj, err := operations.Move(ctx, h.fs, nil, to, from)
if err != nil {
return nil, errors.Wrapf(err, "failed to move %s to %s", from.Remote(), to)
}
return nil, errors.Wrapf(ErrMoveNotSupported, "backend: %s", h.fs.String())

return newObj, nil
}

func (h RCloneHandler) Remove(ctx context.Context, obj fs.Object) error {
Expand Down
110 changes: 110 additions & 0 deletions storagesystem/s3_features_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
package storagesystem

import (
"context"
"testing"

"github.com/data-preservation-programs/singularity/model"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
_ "github.com/rclone/rclone/backend/s3" // Import S3 backend
)

func TestS3Features(t *testing.T) {
// Create a test S3 configuration
testConfig := map[string]string{
"provider": "AWS",
"region": "us-east-1",
"access_key_id": "test",
"secret_access_key": "test",
"endpoint": "http://localhost:9000", // MinIO or localstack endpoint
"chunk_size": "5Mi", // Required for S3
"upload_cutoff": "5Mi",
"copy_cutoff": "5Mi",
"disable_checksum": "true",
"force_path_style": "true",
}

registry, err := fs.Find("s3")
if err != nil {
t.Fatalf("Failed to find S3 backend: %v", err)
}

ctx := context.Background()
ctx, _ = fs.AddConfig(ctx)

// Create S3 filesystem
s3fs, err := registry.NewFs(ctx, "s3", "test-bucket", configmap.Simple(testConfig))
if err != nil {
// This might fail if no S3 endpoint is available, which is OK for feature inspection
t.Logf("Warning: Failed to create S3 filesystem (this is OK if no S3 endpoint is available): %v", err)
// Continue anyway to inspect features
}

if s3fs != nil {
features := s3fs.Features()
t.Logf("S3 Backend Features:")
t.Logf(" Name: %s", s3fs.Name())
t.Logf(" Root: %s", s3fs.Root())
t.Logf(" String: %s", s3fs.String())
t.Logf(" Precision: %s", s3fs.Precision())
t.Logf(" Hashes: %v", s3fs.Hashes())

t.Logf("\nFeature Support:")
t.Logf(" Move: %v", features.Move != nil)
t.Logf(" Copy: %v", features.Copy != nil)
t.Logf(" DirMove: %v", features.DirMove != nil)
t.Logf(" Purge: %v", features.Purge != nil)
t.Logf(" PutStream: %v", features.PutStream != nil)
t.Logf(" About: %v", features.About != nil)
t.Logf(" ServerSideAcrossConfigs: %v", features.ServerSideAcrossConfigs)
t.Logf(" CleanUp: %v", features.CleanUp != nil)
t.Logf(" ListR: %v", features.ListR != nil)
t.Logf(" SetTier: %v", features.SetTier)
t.Logf(" GetTier: %v", features.GetTier)

if features.Move == nil {
t.Logf("\nNote: S3 backend does not expose Move feature")
}
if features.Copy != nil {
t.Logf("Note: S3 backend does expose Copy feature")
}
}
}

func TestS3MoveImplementation(t *testing.T) {
// Let's also check what rclone operations would do
ctx := context.Background()

// Try to create a minimal S3 storage config
storage := model.Storage{
Type: "s3",
Path: "test-bucket",
Config: map[string]string{
"provider": "AWS",
"region": "us-east-1",
"access_key_id": "test",
"secret_access_key": "test",
},
}

handler, err := NewRCloneHandler(ctx, storage)
if err != nil {
t.Logf("Could not create RCloneHandler for S3: %v", err)
t.Logf("This is expected if S3 is not configured")
return
}

// Check the features
if handler.fs != nil {
features := handler.fs.Features()
t.Logf("\nRCloneHandler S3 Features via NewRCloneHandler:")
t.Logf(" Move available: %v", features.Move != nil)
t.Logf(" Copy available: %v", features.Copy != nil)

if features.Move == nil {
t.Logf("\nConfirmed: S3 Move feature is NOT available through RCloneHandler")
t.Logf("This will cause ErrMoveNotSupported and files will keep UUID names")
}
}
}
78 changes: 78 additions & 0 deletions storagesystem/s3_move_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
package storagesystem

import (
"context"
"strings"
"testing"

"github.com/data-preservation-programs/singularity/model"
"github.com/orlangure/gnomock"
"github.com/orlangure/gnomock/preset/localstack"
"github.com/stretchr/testify/require"
)

func TestS3MoveWithCopyDelete(t *testing.T) {
// Skip if Docker not available
p := localstack.Preset(
localstack.WithServices(localstack.S3),
)
localS3, err := gnomock.Start(p)
if err != nil && strings.HasPrefix(err.Error(), "can't start container") {
t.Skip("Docker required for S3 tests")
}
require.NoError(t, err)
defer func() { _ = gnomock.Stop(localS3) }()

ctx := context.Background()
bucketName := "test-bucket"

// Create S3 storage config
storage := model.Storage{
Type: "s3",
Path: bucketName,
Config: map[string]string{
"provider": "Other",
"region": "us-east-1",
"access_key_id": "test",
"secret_access_key": "test",
"endpoint": "http://" + localS3.Address(localstack.APIPort),
"force_path_style": "true",
"chunk_size": "5Mi",
"upload_cutoff": "5Mi",
"copy_cutoff": "5Mi",
},
}

handler, err := NewRCloneHandler(ctx, storage)
require.NoError(t, err)

// Write a test file with UUID name
testContent := []byte("test content for move")
uuidName := "12345678-1234-1234-1234-123456789abc.car"

obj, err := handler.Write(ctx, uuidName, strings.NewReader(string(testContent)))
require.NoError(t, err)
require.Equal(t, uuidName, obj.Remote())

// Now try to move it to piece CID name
pieceCidName := "baga6ea4seaqmk65y3wzeg277zsfvad5ovnpmlcnch6avl2nltop4m4suxft2moa.car"

newObj, err := handler.Move(ctx, obj, pieceCidName)
require.NoError(t, err, "Move should succeed using Copy+Delete fallback")
require.NotNil(t, newObj)
require.Equal(t, pieceCidName, newObj.Remote(), "New object should have piece CID name")

// Verify the original UUID file is gone
entries := handler.Scan(ctx, "")
var foundFiles []string
for entry := range entries {
if entry.Info != nil {
foundFiles = append(foundFiles, entry.Info.Remote())
}
}

require.Contains(t, foundFiles, pieceCidName, "Should find piece CID file")
require.NotContains(t, foundFiles, uuidName, "UUID file should be deleted")

t.Logf("Successfully moved %s to %s using Copy+Delete", uuidName, pieceCidName)
}