serve s3: let rclone act as an S3 compatible server

This commit is contained in:
Mikubill 2022-09-21 15:09:50 +00:00 committed by Nick Craig-Wood
parent d3ba32c43e
commit 23abac2a59
15 changed files with 1166 additions and 0 deletions

1
.gitignore vendored
View File

@ -17,3 +17,4 @@ __pycache__
.DS_Store
/docs/static/img/logos/
resource_windows_*.syso
.devcontainer

View File

@ -140,6 +140,9 @@ var providerOption = fs.Option{
}, {
Value: "RackCorp",
Help: "RackCorp Object Storage",
}, {
Value: "Rclone",
Help: "Rclone S3 Server",
}, {
Value: "Scaleway",
Help: "Scaleway Object Storage",
@ -3149,6 +3152,11 @@ func setQuirks(opt *Options) {
virtualHostStyle = false
urlEncodeListings = false
useAlreadyExists = false // untested
case "Rclone":
listObjectsV2 = true
urlEncodeListings = true
virtualHostStyle = false
useMultipartEtag = false
case "Storj":
// Force chunk size to >= 64 MiB
if opt.ChunkSize < 64*fs.Mebi {

497
cmd/serve/s3/backend.go Normal file
View File

@ -0,0 +1,497 @@
// Package s3 implements a fake s3 server for rclone
package s3
import (
"context"
"crypto/md5"
"encoding/hex"
"io"
"log"
"os"
"path"
"strings"
"sync"
"github.com/Mikubill/gofakes3"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/vfs"
)
var (
emptyPrefix = &gofakes3.Prefix{}
timeFormat = "Mon, 2 Jan 2006 15:04:05.999999999 GMT"
tmpMetaStorage = new(sync.Map)
)
type s3Backend struct {
opt *Options
lock sync.Mutex
fs *vfs.VFS
}
// newBackend creates a new SimpleBucketBackend.
func newBackend(fs *vfs.VFS, opt *Options) gofakes3.Backend {
return &s3Backend{
fs: fs,
opt: opt,
}
}
// ListBuckets always returns the default bucket.
func (db *s3Backend) ListBuckets() ([]gofakes3.BucketInfo, error) {
dirEntries, err := getDirEntries("/", db.fs)
if err != nil {
return nil, err
}
var response []gofakes3.BucketInfo
for _, entry := range dirEntries {
if entry.IsDir() {
response = append(response, gofakes3.BucketInfo{
Name: gofakes3.URLEncode(entry.Name()),
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
})
}
// todo: handle files in root dir
}
return response, nil
}
// ListBucket lists the objects in the given bucket.
func (db *s3Backend) ListBucket(bucket string, prefix *gofakes3.Prefix, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) {
_, err := db.fs.Stat(bucket)
if err != nil {
return nil, gofakes3.BucketNotFound(bucket)
}
if prefix == nil {
prefix = emptyPrefix
}
db.lock.Lock()
defer db.lock.Unlock()
// workaround
if strings.TrimSpace(prefix.Prefix) == "" {
prefix.HasPrefix = false
}
if strings.TrimSpace(prefix.Delimiter) == "" {
prefix.HasDelimiter = false
}
response := gofakes3.NewObjectList()
if db.fs.Fs().Features().BucketBased || prefix.HasDelimiter && prefix.Delimiter != "/" {
err = db.getObjectsListArbitrary(bucket, prefix, response)
} else {
path, remaining := prefixParser(prefix)
err = db.entryListR(bucket, path, remaining, prefix.HasDelimiter, response)
}
if err != nil {
return nil, err
}
return db.pager(response, page)
}
// HeadObject returns the fileinfo for the given object name.
//
// Note that the metadata is not supported yet.
func (db *s3Backend) HeadObject(bucketName, objectName string) (*gofakes3.Object, error) {
_, err := db.fs.Stat(bucketName)
if err != nil {
return nil, gofakes3.BucketNotFound(bucketName)
}
db.lock.Lock()
defer db.lock.Unlock()
fp := path.Join(bucketName, objectName)
node, err := db.fs.Stat(fp)
if err != nil {
return nil, gofakes3.KeyNotFound(objectName)
}
if !node.IsFile() {
return nil, gofakes3.KeyNotFound(objectName)
}
entry := node.DirEntry()
if entry == nil {
return nil, gofakes3.KeyNotFound(objectName)
}
fobj := entry.(fs.Object)
size := node.Size()
hash := getFileHashByte(fobj)
meta := map[string]string{
"Last-Modified": node.ModTime().Format(timeFormat),
"Content-Type": fs.MimeType(context.Background(), fobj),
}
if val, ok := tmpMetaStorage.Load(fp); ok {
metaMap := val.(map[string]string)
for k, v := range metaMap {
meta[k] = v
}
}
return &gofakes3.Object{
Name: objectName,
Hash: hash,
Metadata: meta,
Size: size,
Contents: noOpReadCloser{},
}, nil
}
// GetObject fetchs the object from the filesystem.
func (db *s3Backend) GetObject(bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
_, err = db.fs.Stat(bucketName)
if err != nil {
return nil, gofakes3.BucketNotFound(bucketName)
}
db.lock.Lock()
defer db.lock.Unlock()
fp := path.Join(bucketName, objectName)
node, err := db.fs.Stat(fp)
if err != nil {
return nil, gofakes3.KeyNotFound(objectName)
}
if !node.IsFile() {
return nil, gofakes3.KeyNotFound(objectName)
}
entry := node.DirEntry()
if entry == nil {
return nil, gofakes3.KeyNotFound(objectName)
}
fobj := entry.(fs.Object)
file := node.(*vfs.File)
size := node.Size()
hash := getFileHashByte(fobj)
in, err := file.Open(os.O_RDONLY)
if err != nil {
return nil, gofakes3.ErrInternal
}
defer func() {
// If an error occurs, the caller may not have access to Object.Body in order to close it:
if err != nil {
_ = in.Close()
}
}()
var rdr io.ReadCloser = in
rnge, err := rangeRequest.Range(size)
if err != nil {
return nil, err
}
if rnge != nil {
if _, err := in.Seek(rnge.Start, io.SeekStart); err != nil {
return nil, err
}
rdr = limitReadCloser(rdr, in.Close, rnge.Length)
}
meta := map[string]string{
"Last-Modified": node.ModTime().Format(timeFormat),
"Content-Type": fs.MimeType(context.Background(), fobj),
}
if val, ok := tmpMetaStorage.Load(fp); ok {
metaMap := val.(map[string]string)
for k, v := range metaMap {
meta[k] = v
}
}
return &gofakes3.Object{
Name: gofakes3.URLEncode(objectName),
Hash: hash,
Metadata: meta,
Size: size,
Range: rnge,
Contents: rdr,
}, nil
}
// TouchObject creates or updates meta on specified object.
func (db *s3Backend) TouchObject(fp string, meta map[string]string) (result gofakes3.PutObjectResult, err error) {
_, err = db.fs.Stat(fp)
if err == vfs.ENOENT {
f, err := db.fs.Create(fp)
if err != nil {
return result, err
}
_ = f.Close()
return db.TouchObject(fp, meta)
} else if err != nil {
return result, err
}
_, err = db.fs.Stat(fp)
if err != nil {
return result, err
}
tmpMetaStorage.Store(fp, meta)
if val, ok := meta["X-Amz-Meta-Mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
if val, ok := meta["mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
return result, nil
}
// PutObject creates or overwrites the object with the given name.
func (db *s3Backend) PutObject(
bucketName, objectName string,
meta map[string]string,
input io.Reader, size int64,
) (result gofakes3.PutObjectResult, err error) {
_, err = db.fs.Stat(bucketName)
if err != nil {
return result, gofakes3.BucketNotFound(bucketName)
}
db.lock.Lock()
defer db.lock.Unlock()
fp := path.Join(bucketName, objectName)
objectDir := path.Dir(fp)
// _, err = db.fs.Stat(objectDir)
// if err == vfs.ENOENT {
// fs.Errorf(objectDir, "PutObject failed: path not found")
// return result, gofakes3.KeyNotFound(objectName)
// }
if objectDir != "." {
if err := mkdirRecursive(objectDir, db.fs); err != nil {
return result, err
}
}
if size == 0 {
// maybe a touch operation
return db.TouchObject(fp, meta)
}
f, err := db.fs.Create(fp)
if err != nil {
return result, err
}
hasher := md5.New()
w := io.MultiWriter(f, hasher)
if _, err := io.Copy(w, input); err != nil {
// remove file when i/o error occurred (FsPutErr)
_ = f.Close()
_ = db.fs.Remove(fp)
return result, err
}
if err := f.Close(); err != nil {
// remove file when close error occurred (FsPutErr)
_ = db.fs.Remove(fp)
return result, err
}
_, err = db.fs.Stat(fp)
if err != nil {
return result, err
}
tmpMetaStorage.Store(fp, meta)
if val, ok := meta["X-Amz-Meta-Mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
if val, ok := meta["mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
return result, nil
}
// DeleteMulti deletes multiple objects in a single request.
func (db *s3Backend) DeleteMulti(bucketName string, objects ...string) (result gofakes3.MultiDeleteResult, rerr error) {
db.lock.Lock()
defer db.lock.Unlock()
for _, object := range objects {
if err := db.deleteObjectLocked(bucketName, object); err != nil {
log.Println("delete object failed:", err)
result.Error = append(result.Error, gofakes3.ErrorResult{
Code: gofakes3.ErrInternal,
Message: gofakes3.ErrInternal.Message(),
Key: object,
})
} else {
result.Deleted = append(result.Deleted, gofakes3.ObjectID{
Key: object,
})
}
}
return result, nil
}
// DeleteObject deletes the object with the given name.
func (db *s3Backend) DeleteObject(bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) {
db.lock.Lock()
defer db.lock.Unlock()
return result, db.deleteObjectLocked(bucketName, objectName)
}
// deleteObjectLocked deletes the object from the filesystem.
func (db *s3Backend) deleteObjectLocked(bucketName, objectName string) error {
_, err := db.fs.Stat(bucketName)
if err != nil {
return gofakes3.BucketNotFound(bucketName)
}
fp := path.Join(bucketName, objectName)
// S3 does not report an error when attemping to delete a key that does not exist, so
// we need to skip IsNotExist errors.
if err := db.fs.Remove(fp); err != nil && !os.IsNotExist(err) {
return err
}
// fixme: unsafe operation
if db.fs.Fs().Features().CanHaveEmptyDirectories {
rmdirRecursive(fp, db.fs)
}
return nil
}
// CreateBucket creates a new bucket.
func (db *s3Backend) CreateBucket(name string) error {
_, err := db.fs.Stat(name)
if err != nil && err != vfs.ENOENT {
return gofakes3.ErrInternal
}
if err == nil {
return gofakes3.ErrBucketAlreadyExists
}
if err := db.fs.Mkdir(name, 0755); err != nil {
return gofakes3.ErrInternal
}
return nil
}
// DeleteBucket deletes the bucket with the given name.
func (db *s3Backend) DeleteBucket(name string) error {
_, err := db.fs.Stat(name)
if err != nil {
return gofakes3.BucketNotFound(name)
}
if err := db.fs.Remove(name); err != nil {
return gofakes3.ErrBucketNotEmpty
}
return nil
}
// BucketExists checks if the bucket exists.
func (db *s3Backend) BucketExists(name string) (exists bool, err error) {
_, err = db.fs.Stat(name)
if err != nil {
return false, nil
}
return true, nil
}
// CopyObject copy specified object from srcKey to dstKey.
func (db *s3Backend) CopyObject(srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result gofakes3.CopyObjectResult, err error) {
fp := path.Join(srcBucket, srcKey)
if srcBucket == dstBucket && srcKey == dstKey {
tmpMetaStorage.Store(fp, meta)
val, ok := meta["X-Amz-Meta-Mtime"]
if !ok {
if val, ok = meta["mtime"]; !ok {
return
}
}
// update modtime
ti, err := swift.FloatStringToTime(val)
if err != nil {
return result, nil
}
return result, db.fs.Chtimes(fp, ti, ti)
}
cStat, err := db.fs.Stat(fp)
if err != nil {
return
}
c, err := db.GetObject(srcBucket, srcKey, nil)
if err != nil {
return
}
defer func() {
_ = c.Contents.Close()
}()
for k, v := range c.Metadata {
if _, found := meta[k]; !found && k != "X-Amz-Acl" {
meta[k] = v
}
}
if _, ok := meta["mtime"]; !ok {
meta["mtime"] = swift.TimeToFloatString(cStat.ModTime())
}
_, err = db.PutObject(dstBucket, dstKey, meta, c.Contents, c.Size)
if err != nil {
return
}
return gofakes3.CopyObjectResult{
ETag: `"` + hex.EncodeToString(c.Hash) + `"`,
LastModified: gofakes3.NewContentTime(cStat.ModTime()),
}, nil
}

45
cmd/serve/s3/help.go Normal file
View File

@ -0,0 +1,45 @@
package s3
var longHelp = `
Serve s3 implements a basic s3 server that serves a remote
via s3. This can be viewed with an s3 client, or you can make
an s3 type remote to read and write to it.
S3 server supports Signature Version 4 authentication. Just
use ` + `--s3-authkey accessKey1,secretKey1` + ` and
set Authorization Header correctly in the request. (See
https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)
Please note that some clients may require HTTPS endpoints.
See [#SSL](#ssl-tls) for SSL configuration.
Use ` + `--force-path-style=false` + ` if you want to use bucket name as a part of
hostname (such as mybucket.local)
Use ` + `--etag-hash` + ` if you want to change hash provider.
Limitations
serve s3 will treat all depth=1 directories in root as buckets and
ignore files in that depth. You might use CreateBucket to create
folders under root, but you can't create empty folder under other folders.
When using PutObject or DeleteObject, rclone will automatically create
or clean up empty folders by the prefix. If you don't want to clean up
empty folders automatically, use ` + `--no-cleanup` + `.
When using ListObjects, rclone will use ` + `/` + ` when the delimiter is empty.
This reduces backend requests with no effect on most operations, but if
the delimiter is something other than slash and nil, rclone will do a
full recursive search to the backend, which may take some time.
serve s3 currently supports the following operations.
Bucket-level operations
ListBuckets, CreateBucket, DeleteBucket
Object-level operations
HeadObject, ListObjects, GetObject, PutObject, DeleteObject, DeleteObjects,
CreateMultipartUpload, CompleteMultipartUpload, AbortMultipartUpload,
CopyObject, UploadPart
Other operations will encounter error Unimplemented.
`

34
cmd/serve/s3/ioutils.go Normal file
View File

@ -0,0 +1,34 @@
package s3
import "io"
type noOpReadCloser struct{}
type readerWithCloser struct {
io.Reader
closer func() error
}
var _ io.ReadCloser = &readerWithCloser{}
func (d noOpReadCloser) Read(b []byte) (n int, err error) {
return 0, io.EOF
}
func (d noOpReadCloser) Close() error {
return nil
}
func limitReadCloser(rdr io.Reader, closer func() error, sz int64) io.ReadCloser {
return &readerWithCloser{
Reader: io.LimitReader(rdr, sz),
closer: closer,
}
}
func (rwc *readerWithCloser) Close() error {
if rwc.closer != nil {
return rwc.closer()
}
return nil
}

86
cmd/serve/s3/list.go Normal file
View File

@ -0,0 +1,86 @@
package s3
import (
"context"
"path"
"strings"
"github.com/Mikubill/gofakes3"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
)
func (db *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix bool, response *gofakes3.ObjectList) error {
fp := path.Join(bucket, fdPath)
dirEntries, err := getDirEntries(fp, db.fs)
if err != nil {
return err
}
for _, entry := range dirEntries {
object := entry.Name()
// workround for control-chars detect
objectPath := path.Join(fdPath, object)
if !strings.HasPrefix(object, name) {
continue
}
if entry.IsDir() {
if acceptComPrefix {
response.AddPrefix(gofakes3.URLEncode(objectPath))
continue
}
err := db.entryListR(bucket, path.Join(fdPath, object), "", false, response)
if err != nil {
return err
}
} else {
item := &gofakes3.Content{
Key: gofakes3.URLEncode(objectPath),
LastModified: gofakes3.NewContentTime(entry.ModTime()),
ETag: getFileHash(entry),
Size: entry.Size(),
StorageClass: gofakes3.StorageStandard,
}
response.Add(item)
}
}
return nil
}
// getObjectsList lists the objects in the given bucket.
func (db *s3Backend) getObjectsListArbitrary(bucket string, prefix *gofakes3.Prefix, response *gofakes3.ObjectList) error {
// ignore error - vfs may have uncommitted updates, such as new dir etc.
_ = walk.ListR(context.Background(), db.fs.Fs(), bucket, false, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, entry := range entries {
entry := entry.(fs.Object)
objName := entry.Remote()
object := strings.TrimPrefix(objName, bucket)[1:]
var matchResult gofakes3.PrefixMatch
if prefix.Match(object, &matchResult) {
if matchResult.CommonPrefix {
response.AddPrefix(gofakes3.URLEncode(object))
continue
}
item := &gofakes3.Content{
Key: gofakes3.URLEncode(object),
LastModified: gofakes3.NewContentTime(entry.ModTime(context.Background())),
ETag: getFileHash(entry),
Size: entry.Size(),
StorageClass: gofakes3.StorageStandard,
}
response.Add(item)
}
}
return nil
})
return nil
}

26
cmd/serve/s3/logger.go Normal file
View File

@ -0,0 +1,26 @@
package s3
import (
"fmt"
"github.com/Mikubill/gofakes3"
"github.com/rclone/rclone/fs"
)
// logger output formatted message
type logger struct{}
// print log message
func (l logger) Print(level gofakes3.LogLevel, v ...interface{}) {
// fs.Infof(nil, fmt.Sprintln(v...))
switch level {
case gofakes3.LogErr:
fs.Errorf(nil, fmt.Sprintln(v...))
case gofakes3.LogWarn:
fs.Infof(nil, fmt.Sprintln(v...))
case gofakes3.LogInfo:
fs.Debugf(nil, fmt.Sprintln(v...))
default:
panic("unknown level")
}
}

66
cmd/serve/s3/pager.go Normal file
View File

@ -0,0 +1,66 @@
// Package s3 implements a fake s3 server for rclone
package s3
import (
"sort"
"github.com/Mikubill/gofakes3"
)
// pager splits the object list into smulitply pages.
func (db *s3Backend) pager(list *gofakes3.ObjectList, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) {
// sort by alphabet
sort.Slice(list.CommonPrefixes, func(i, j int) bool {
return list.CommonPrefixes[i].Prefix < list.CommonPrefixes[j].Prefix
})
// sort by modtime
sort.Slice(list.Contents, func(i, j int) bool {
return list.Contents[i].LastModified.Before(list.Contents[j].LastModified.Time)
})
tokens := page.MaxKeys
if tokens == 0 {
tokens = 1000
}
if page.HasMarker {
for i, obj := range list.Contents {
if obj.Key == page.Marker {
list.Contents = list.Contents[i+1:]
break
}
}
for i, obj := range list.CommonPrefixes {
if obj.Prefix == page.Marker {
list.CommonPrefixes = list.CommonPrefixes[i+1:]
break
}
}
}
response := gofakes3.NewObjectList()
for _, obj := range list.CommonPrefixes {
if tokens <= 0 {
break
}
response.AddPrefix(obj.Prefix)
tokens--
}
for _, obj := range list.Contents {
if tokens <= 0 {
break
}
response.Add(obj)
tokens--
}
if len(list.CommonPrefixes)+len(list.Contents) > int(page.MaxKeys) {
response.IsTruncated = true
if len(response.Contents) > 0 {
response.NextMarker = response.Contents[len(response.Contents)-1].Key
} else {
response.NextMarker = response.CommonPrefixes[len(response.CommonPrefixes)-1].Prefix
}
}
return response, nil
}

70
cmd/serve/s3/s3.go Normal file
View File

@ -0,0 +1,70 @@
package s3
import (
"context"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
)
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
pathBucketMode: true,
hashName: "MD5",
hashType: hash.MD5,
noCleanup: false,
HTTP: httplib.DefaultCfg(),
}
// Opt is options set by command line flags
var Opt = DefaultOpt
const flagPrefix = ""
func init() {
flagSet := Command.Flags()
httplib.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
vfsflags.AddFlags(flagSet)
flags.BoolVarP(flagSet, &Opt.pathBucketMode, "force-path-style", "", Opt.pathBucketMode, "If true use path style access if false use virtual hosted style (default true)")
flags.StringVarP(flagSet, &Opt.hashName, "etag-hash", "", Opt.hashName, "Which hash to use for the ETag, or auto or blank for off")
flags.StringArrayVarP(flagSet, &Opt.authPair, "s3-authkey", "", Opt.authPair, "Set key pair for v4 authorization, split by comma")
flags.BoolVarP(flagSet, &Opt.noCleanup, "no-cleanup", "", Opt.noCleanup, "Not to cleanup empty folder after object is deleted")
}
// Command definition for cobra
var Command = &cobra.Command{
Use: "s3 remote:path",
Short: `Serve remote:path over s3.`,
Long: strings.ReplaceAll(longHelp, "|", "`") + httplib.Help(flagPrefix) + vfs.Help,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
if Opt.hashName == "auto" {
Opt.hashType = f.Hashes().GetOne()
} else if Opt.hashName != "" {
err := Opt.hashType.Set(Opt.hashName)
if err != nil {
return err
}
}
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt)
if err != nil {
return err
}
router := s.Router()
s.Bind(router)
s.Serve()
s.Wait()
return nil
})
return nil
},
}

136
cmd/serve/s3/s3_test.go Normal file
View File

@ -0,0 +1,136 @@
// Serve s3 tests set up a server and run the integration tests
// for the s3 remote against it.
package s3
import (
"context"
"encoding/hex"
"fmt"
"math/rand"
"os"
"os/exec"
"strings"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
httplib "github.com/rclone/rclone/lib/http"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
endpoint = "localhost:0"
)
// TestS3 runs the s3 server then runs the unit tests for the
// s3 remote against it.
func TestS3(t *testing.T) {
// Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
keyid := RandString(16)
keysec := RandString(16)
serveropt := &Options{
HTTP: httplib.DefaultCfg(),
pathBucketMode: true,
hashName: "",
hashType: hash.None,
authPair: []string{fmt.Sprintf("%s,%s", keyid, keysec)},
}
serveropt.HTTP.ListenAddr = []string{endpoint}
w, err := newServer(context.Background(), f, serveropt)
router := w.Router()
assert.NoError(t, err)
w.Bind(router)
w.Serve()
testURL := w.Server.URLs()[0]
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "s3",
"provider": "Rclone",
"endpoint": testURL,
"list_url_encode": "true",
"access_key_id": keyid,
"secret_access_key": keysec,
}
return config, func() {}
}
Run(t, "s3", start)
}
func RandString(n int) string {
src := rand.New(rand.NewSource(time.Now().UnixNano()))
b := make([]byte, (n+1)/2)
if _, err := src.Read(b); err != nil {
panic(err)
}
return hex.EncodeToString(b)[:n]
}
func Run(t *testing.T, name string, start servetest.StartFn) {
fstest.Initialise()
ci := fs.GetConfig(context.Background())
ci.DisableFeatures = append(ci.DisableFeatures, "Metadata")
fremote, _, clean, err := fstest.RandomRemote()
assert.NoError(t, err)
defer clean()
err = fremote.Mkdir(context.Background(), "")
assert.NoError(t, err)
f := fremote
config, cleanup := start(f)
defer cleanup()
// Change directory to run the tests
cwd, err := os.Getwd()
require.NoError(t, err)
err = os.Chdir("../../../backend/" + name)
require.NoError(t, err, "failed to cd to "+name+" backend")
defer func() {
// Change back to the old directory
require.NoError(t, os.Chdir(cwd))
}()
// Run the backend tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
}
if *fstest.Verbose {
args = append(args, "-verbose")
}
remoteName := name + "test:"
args = append(args, "-remote", remoteName)
args = append(args, "-run", "^TestIntegration$")
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
cmd := exec.Command("go", args...)
// Configure the backend with environment variables
cmd.Env = os.Environ()
prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_"
for k, v := range config {
cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v)
}
// Run the test
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running "+name+" integration tests")
}

72
cmd/serve/s3/server.go Normal file
View File

@ -0,0 +1,72 @@
// Package s3 implements a fake s3 server for rclone
package s3
import (
"context"
"fmt"
"math/rand"
"net/http"
"github.com/Mikubill/gofakes3"
"github.com/go-chi/chi/v5"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
// Options contains options for the http Server
type Options struct {
//TODO add more options
pathBucketMode bool
hashName string
hashType hash.Type
authPair []string
noCleanup bool
HTTP httplib.Config
}
// Server is a s3.FileSystem interface
type Server struct {
*httplib.Server
f fs.Fs
vfs *vfs.VFS
faker *gofakes3.GoFakeS3
handler http.Handler
ctx context.Context // for global config
}
// Make a new S3 Server to serve the remote
func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error) {
w := &Server{
f: f,
ctx: ctx,
vfs: vfs.New(f, &vfsflags.Opt),
}
var newLogger logger
w.faker = gofakes3.New(
newBackend(w.vfs, opt),
gofakes3.WithHostBucket(!opt.pathBucketMode),
gofakes3.WithLogger(newLogger),
gofakes3.WithRequestID(rand.Uint64()),
gofakes3.WithoutVersioning(),
gofakes3.WithV4Auth(authlistResolver(opt.authPair)),
)
w.Server, err = httplib.NewServer(ctx,
httplib.WithConfig(opt.HTTP),
)
if err != nil {
return nil, fmt.Errorf("failed to init server: %w", err)
}
w.handler = w.faker.Server()
return w, nil
}
// Bind register the handler to http.Router
func (w *Server) Bind(router chi.Router) {
router.Handle("/*", w.handler)
}

113
cmd/serve/s3/utils.go Normal file
View File

@ -0,0 +1,113 @@
package s3
import (
"context"
"encoding/hex"
"fmt"
"path"
"strings"
"github.com/Mikubill/gofakes3"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/vfs"
)
func getDirEntries(prefix string, fs *vfs.VFS) (vfs.Nodes, error) {
node, err := fs.Stat(prefix)
if err == vfs.ENOENT {
return nil, gofakes3.ErrNoSuchKey
} else if err != nil {
return nil, err
}
if !node.IsDir() {
return nil, gofakes3.ErrNoSuchKey
}
dir := node.(*vfs.Dir)
dirEntries, err := dir.ReadDirAll()
if err != nil {
return nil, err
}
return dirEntries, nil
}
func getFileHashByte(node interface{}) []byte {
b, err := hex.DecodeString(getFileHash(node))
if err != nil {
return nil
}
return b
}
func getFileHash(node interface{}) string {
var o fs.Object
switch b := node.(type) {
case vfs.Node:
o = b.DirEntry().(fs.Object)
case fs.DirEntry:
o = b.(fs.Object)
}
hash, err := o.Hash(context.Background(), Opt.hashType)
if err != nil {
return ""
}
return hash
}
func prefixParser(p *gofakes3.Prefix) (path, remaining string) {
idx := strings.LastIndexByte(p.Prefix, '/')
if idx < 0 {
return "", p.Prefix
}
return p.Prefix[:idx], p.Prefix[idx+1:]
}
func mkdirRecursive(path string, fs *vfs.VFS) error {
path = strings.Trim(path, "/")
dirs := strings.Split(path, "/")
dir := ""
for _, d := range dirs {
dir += "/" + d
if _, err := fs.Stat(dir); err != nil {
err := fs.Mkdir(dir, 0777)
if err != nil {
return err
}
}
}
return nil
}
func rmdirRecursive(p string, fs *vfs.VFS) {
dir := path.Dir(p)
if !strings.ContainsAny(dir, "/\\") {
// might be bucket(root)
return
}
if _, err := fs.Stat(dir); err == nil {
err := fs.Remove(dir)
if err != nil {
return
}
rmdirRecursive(dir, fs)
}
}
func authlistResolver(list []string) map[string]string {
authList := make(map[string]string)
for _, v := range list {
splited := strings.Split(v, ",")
if len(splited) != 2 {
fs.Infof(nil, fmt.Sprintf("Ignored: invalid auth pair %s", v))
continue
}
authList[splited[0]] = splited[1]
}
return authList
}

View File

@ -11,6 +11,7 @@ import (
"github.com/rclone/rclone/cmd/serve/http"
"github.com/rclone/rclone/cmd/serve/nfs"
"github.com/rclone/rclone/cmd/serve/restic"
"github.com/rclone/rclone/cmd/serve/s3"
"github.com/rclone/rclone/cmd/serve/sftp"
"github.com/rclone/rclone/cmd/serve/webdav"
"github.com/spf13/cobra"
@ -39,6 +40,9 @@ func init() {
if nfs.Command != nil {
Command.AddCommand(nfs.Command)
}
if s3.Command != nil {
Command.AddCommand(s3.Command)
}
cmd.Root.AddCommand(Command)
}

3
go.mod
View File

@ -9,6 +9,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
github.com/Mikubill/gofakes3 v0.0.3-0.20221030004050-725f2cf2bf5e
github.com/Unknwon/goconfig v1.0.0
github.com/a8m/tree v0.0.0-20230208161321-36ae24ddad15
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
@ -147,6 +148,8 @@ require (
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect
github.com/relvacode/iso8601 v1.3.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sony/gobreaker v0.5.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect

5
go.sum
View File

@ -58,6 +58,8 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIf
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Mikubill/gofakes3 v0.0.3-0.20221030004050-725f2cf2bf5e h1:gtBhC9D1R/uuoov9wO8IDx3E25Tqn8nW7xRTvgPDP2E=
github.com/Mikubill/gofakes3 v0.0.3-0.20221030004050-725f2cf2bf5e/go.mod h1:OSXqXEGUe9CmPiwLMMnVrbXonMf4BeLBkBdLufxxiyY=
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
@ -456,6 +458,7 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E=
github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA=
@ -776,6 +779,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -806,6 +810,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=