Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion .github/workflows/unit-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,11 @@ jobs:
run: |
export CGO_ENABLED=0
go version
go run github.com/onsi/ginkgo/v2/ginkgo --skip-package=integration ./s3/...
go run github.com/onsi/ginkgo/v2/ginkgo --skip-package=integration ./s3/...

- name: common package unit tests
run: |
export CGO_ENABLED=0
go version
go run github.com/onsi/ginkgo/v2/ginkgo ./common/...

10 changes: 9 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ storage-cli -s <provider> -c <config-file> <command> [arguments]
- `-s`: Storage provider type (azurebs|s3|gcs|alioss|dav)
- `-c`: Path to provider-specific configuration file
- `-v`: Show version
- `-log-file`: Path to log file (optional, logs to stderr by default)
- `-log-level`: Logging level: debug, info, warn, error (default: warn)

**Common commands:**
- `put <path/to/file> <remote-object>` - Upload a local file to remote storage
Expand All @@ -57,7 +59,7 @@ storage-cli -s <provider> -c <config-file> <command> [arguments]
- `copy <source-object> <destination-object>` - Copy object within the same storage
- `sign <object> <action> <duration_as_second>` - Generate signed URL (action: get|put, duration: e.g., 60s)
- `properties <remote-object>` - Display properties/metadata of a remote object
- `ensure-storage-exists` - Ensure the storage container/bucket exists
- `ensure-storage-exists` - Ensure the storage container/bucket exists, if not create the storage(bucket,container etc)

**Examples:**
```shell
Expand All @@ -75,6 +77,12 @@ storage-cli -s azurebs -c azure-config.json properties my-blob.txt

# Sign object for 'get' in alioss for 60 seconds
storage-cli -s alioss -c alioss-config.json sign object.txt get 60s

# Upload file with debug logging to file
storage-cli -s s3 -c s3-config.json -log-level debug -log-file storage.log put local-file.txt remote-object.txt

# List objects with error-level logging only
storage-cli -s gcs -c gcs-config.json -log-level error list my-prefix
```

## Contributing
Expand Down
3 changes: 0 additions & 3 deletions alioss/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"encoding/base64"
"fmt"
"io"
"log"
"os"
"strings"
"time"
Expand All @@ -29,8 +28,6 @@ func (client *AliBlobstore) Put(sourceFilePath string, destinationObject string)
if err != nil {
return fmt.Errorf("upload failure: %w", err)
}

log.Println("Successfully uploaded file")
return nil
}

Expand Down
120 changes: 49 additions & 71 deletions alioss/client/storage_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@ import (
"encoding/json"
"errors"
"fmt"
"log"
"log/slog"
"strconv"
"strings"
"time"

"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/cloudfoundry/storage-cli/alioss/config"
"github.com/cloudfoundry/storage-cli/common"
)

//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . StorageClient
Expand Down Expand Up @@ -74,14 +75,20 @@ func NewStorageClient(storageConfig config.AliStorageConfig) (StorageClient, err
}, nil
}

func (dsc DefaultStorageClient) Upload(
sourceFilePath string,
sourceFileMD5 string,
destinationObject string,
) error {
log.Printf("Uploading %s/%s\n", dsc.storageConfig.BucketName, destinationObject)
func newOSSClient(endpoint, accesKeyID, accessKeySecret string) (*oss.Client, error) {
if common.IsDebug() {
slogLogger := slog.Default()
ossLogger := slog.NewLogLogger(slogLogger.Handler(), slog.LevelDebug)
return oss.New(endpoint, accesKeyID, accessKeySecret, oss.SetLogLevel(oss.Debug), oss.SetLogger(ossLogger))
} else {
return oss.New(endpoint, accesKeyID, accessKeySecret)
}
}

func (dsc DefaultStorageClient) Upload(sourceFilePath string, sourceFileMD5 string, destinationObject string) error {
slog.Info("Uploading object to OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", destinationObject, "file_path", sourceFilePath)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return err
}
Expand All @@ -94,13 +101,10 @@ func (dsc DefaultStorageClient) Upload(
return bucket.PutObjectFromFile(destinationObject, sourceFilePath, oss.ContentMD5(sourceFileMD5))
}

func (dsc DefaultStorageClient) Download(
sourceObject string,
destinationFilePath string,
) error {
log.Printf("Downloading %s/%s\n", dsc.storageConfig.BucketName, sourceObject)
func (dsc DefaultStorageClient) Download(sourceObject string, destinationFilePath string) error {
slog.Info("Downloading object from OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", sourceObject, "file_path", destinationFilePath)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return err
}
Expand All @@ -113,15 +117,12 @@ func (dsc DefaultStorageClient) Download(
return bucket.GetObjectToFile(sourceObject, destinationFilePath)
}

func (dsc DefaultStorageClient) Copy(
sourceObject string,
destinationObject string,
) error {
log.Printf("Copying object from %s to %s", sourceObject, destinationObject)
func (dsc DefaultStorageClient) Copy(sourceObject string, destinationObject string) error {
slog.Info("copying object within OSS bucket", "bucket", dsc.storageConfig.BucketName, "source_object", sourceObject, "destination_object", destinationObject)
srcOut := fmt.Sprintf("%s/%s", dsc.storageConfig.BucketName, sourceObject)
destOut := fmt.Sprintf("%s/%s", dsc.storageConfig.BucketName, destinationObject)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return err
}
Expand All @@ -138,12 +139,10 @@ func (dsc DefaultStorageClient) Copy(
return nil
}

func (dsc DefaultStorageClient) Delete(
object string,
) error {
log.Printf("Deleting %s/%s\n", dsc.storageConfig.BucketName, object)
func (dsc DefaultStorageClient) Delete(object string) error {
slog.Info("Deleting object from OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return err
}
Expand All @@ -156,21 +155,14 @@ func (dsc DefaultStorageClient) Delete(
return bucket.DeleteObject(object)
}

func (dsc DefaultStorageClient) DeleteRecursive(
prefix string,
) error {
func (dsc DefaultStorageClient) DeleteRecursive(prefix string) error {
if prefix != "" {
log.Printf("Deleting all objects in bucket %s with prefix '%s'\n",
dsc.storageConfig.BucketName, prefix)
slog.Info("Deleting all objects with prefix from OSS bucket", "bucket", dsc.storageConfig.BucketName, "prefix", prefix)
} else {
log.Printf("Deleting all objects in bucket %s\n", dsc.storageConfig.BucketName)
slog.Info("Deleting all objects from OSS bucket", "bucket", dsc.storageConfig.BucketName)
}

client, err := oss.New(
dsc.storageConfig.Endpoint,
dsc.storageConfig.AccessKeyID,
dsc.storageConfig.AccessKeySecret,
)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return err
}
Expand Down Expand Up @@ -225,9 +217,9 @@ func (dsc DefaultStorageClient) DeleteRecursive(
}

func (dsc DefaultStorageClient) Exists(object string) (bool, error) {
log.Printf("Checking if blob: %s/%s\n", dsc.storageConfig.BucketName, object)
slog.Info("Checking if object exists in OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return false, err
}
Expand All @@ -243,22 +235,18 @@ func (dsc DefaultStorageClient) Exists(object string) (bool, error) {
}

if objectExists {
log.Printf("File '%s' exists in bucket '%s'\n", object, dsc.storageConfig.BucketName)
slog.Info("Object exists in OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object)
return true, nil
} else {
log.Printf("File '%s' does not exist in bucket '%s'\n", object, dsc.storageConfig.BucketName)
slog.Info("Object does not exist in OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object)
return false, nil
}
}

func (dsc DefaultStorageClient) SignedUrlPut(
object string,
expiredInSec int64,
) (string, error) {

log.Printf("Getting signed PUT url for blob %s/%s\n", dsc.storageConfig.BucketName, object)
func (dsc DefaultStorageClient) SignedUrlPut(object string, expiredInSec int64) (string, error) {
slog.Info("Generating signed PUT URL for OSS object", "bucket", dsc.storageConfig.BucketName, "object_key", object, "expiration_seconds", expiredInSec)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return "", err
}
Expand All @@ -271,14 +259,10 @@ func (dsc DefaultStorageClient) SignedUrlPut(
return bucket.SignURL(object, oss.HTTPPut, expiredInSec)
}

func (dsc DefaultStorageClient) SignedUrlGet(
object string,
expiredInSec int64,
) (string, error) {

log.Printf("Getting signed GET url for blob %s/%s\n", dsc.storageConfig.BucketName, object)
func (dsc DefaultStorageClient) SignedUrlGet(object string, expiredInSec int64) (string, error) {
slog.Info("Generating signed GET URL for OSS object", "bucket", dsc.storageConfig.BucketName, "object_key", object, "expiration_seconds", expiredInSec)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return "", err
}
Expand All @@ -291,14 +275,11 @@ func (dsc DefaultStorageClient) SignedUrlGet(
return bucket.SignURL(object, oss.HTTPGet, expiredInSec)
}

func (dsc DefaultStorageClient) List(
prefix string,
) ([]string, error) {
func (dsc DefaultStorageClient) List(prefix string) ([]string, error) {
if prefix != "" {
log.Printf("Listing objects in bucket %s with prefix '%s'\n",
dsc.storageConfig.BucketName, prefix)
slog.Info("Listing all objects in OSS bucket with prefix", "bucket", dsc.storageConfig.BucketName, "prefix", prefix)
} else {
log.Printf("Listing objects in bucket %s\n", dsc.storageConfig.BucketName)
slog.Info("Listing all objects in OSS bucket", "bucket", dsc.storageConfig.BucketName)
}

var (
Expand All @@ -315,7 +296,7 @@ func (dsc DefaultStorageClient) List(
opts = append(opts, oss.Marker(marker))
}

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -349,13 +330,10 @@ type BlobProperties struct {
ContentLength int64 `json:"content_length,omitempty"`
}

func (dsc DefaultStorageClient) Properties(
object string,
) error {
log.Printf("Getting properties for object %s/%s\n",
dsc.storageConfig.BucketName, object)
func (dsc DefaultStorageClient) Properties(object string) error {
slog.Info("Getting object properties from OSS bucket", "bucket", dsc.storageConfig.BucketName, "object_key", object)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return err
}
Expand Down Expand Up @@ -415,9 +393,9 @@ func (dsc DefaultStorageClient) Properties(
}

func (dsc DefaultStorageClient) EnsureBucketExists() error {
log.Printf("Ensuring bucket '%s' exists\n", dsc.storageConfig.BucketName)
slog.Info("Ensuring OSS bucket exists", "bucket", dsc.storageConfig.BucketName)

client, err := oss.New(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
client, err := newOSSClient(dsc.storageConfig.Endpoint, dsc.storageConfig.AccessKeyID, dsc.storageConfig.AccessKeySecret)
if err != nil {
return err
}
Expand All @@ -428,14 +406,14 @@ func (dsc DefaultStorageClient) EnsureBucketExists() error {
}

if exists {
log.Printf("Bucket '%s' already exists\n", dsc.storageConfig.BucketName)
slog.Info("OSS bucket already exists", "bucket", dsc.storageConfig.BucketName)
return nil
}

if err := client.CreateBucket(dsc.storageConfig.BucketName); err != nil {
return fmt.Errorf("failed to create bucket '%s': %w", dsc.storageConfig.BucketName, err)
}

log.Printf("Bucket '%s' created successfully\n", dsc.storageConfig.BucketName)
slog.Info("OSS bucket created successfully", "bucket", dsc.storageConfig.BucketName)
return nil
}
4 changes: 2 additions & 2 deletions alioss/integration/general_ali_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)

var _ = Describe("General testing for all Ali regions", func() {
Expand Down Expand Up @@ -50,8 +51,7 @@ var _ = Describe("General testing for all Ali regions", func() {
cliSession, err = integration.RunCli(cliPath, configPath, storageType, "exists", blobName)
Expect(err).ToNot(HaveOccurred())
Expect(cliSession.ExitCode()).To(BeZero())

Expect(string(cliSession.Err.Contents())).To(MatchRegexp("File '" + blobName + "' exists in bucket '" + bucketName + "'"))
Expect(cliSession.Err).Should(gbytes.Say(`"msg":"Object exists in OSS bucket"`))
})

It("overwrites an existing file", func() {
Expand Down
2 changes: 2 additions & 0 deletions alioss/integration/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ func RunCli(cliPath string, configPath string, storageType string, subcommand st
configPath,
"-s",
storageType,
"-log-level",
"info",
subcommand,
}
cmdArgs = append(cmdArgs, args...)
Expand Down
14 changes: 7 additions & 7 deletions azurebs/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"crypto/md5"
"fmt"
"io"
"log"
"log/slog"
"os"
"strings"
"time"
Expand Down Expand Up @@ -37,24 +37,24 @@ func (client *AzBlobstore) Put(sourceFilePath string, dest string) error {
}

if !bytes.Equal(sourceMD5, md5) {
log.Println("The upload failed because of an MD5 inconsistency. Triggering blob deletion ...")
slog.Error("Upload failed due to MD5 mismatch, deleting blob", "blob", dest, "expected_md5", fmt.Sprintf("%x", sourceMD5), "received_md5", fmt.Sprintf("%x", md5))

err := client.storageClient.Delete(dest)
if err != nil {
log.Println(fmt.Errorf("blob deletion failed: %w", err))
}
slog.Error("Failed to delete blob after MD5 mismatch", "blob", dest, "error", err)

return fmt.Errorf("the upload responded an MD5 %v does not match the source file MD5 %v", md5, sourceMD5)
}
return fmt.Errorf("MD5 mismatch: expected %x, got %x", sourceMD5, md5)
}

log.Println("Successfully uploaded file")
slog.Debug("MD5 verification passed", "blob", dest, "md5", fmt.Sprintf("%x", md5))
return nil
}

func (client *AzBlobstore) Get(source string, dest string) error {
dstFile, err := os.Create(dest)
if err != nil {
log.Fatalln(err)
return fmt.Errorf("failed to create destination file: %w", err)
}
defer dstFile.Close() //nolint:errcheck

Expand Down
2 changes: 1 addition & 1 deletion azurebs/client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ var _ = Describe("Client", func() {
file, _ := os.CreateTemp("", "tmpfile") //nolint:errcheck

putError := azBlobstore.Put(file.Name(), "target/blob")
Expect(putError.Error()).To(Equal("the upload responded an MD5 [1 2 3] does not match the source file MD5 [212 29 140 217 143 0 178 4 233 128 9 152 236 248 66 126]"))
Expect(putError.Error()).To(Equal("MD5 mismatch: expected d41d8cd98f00b204e9800998ecf8427e, got 010203"))

Expect(storageClient.UploadCallCount()).To(Equal(1))
source, dest := storageClient.UploadArgsForCall(0)
Expand Down
Loading
Loading