1
mirror of https://github.com/rclone/rclone synced 2024-12-22 13:03:02 +01:00

fstest: factor chunked streaming tests from b2 and use in all backends

This commit is contained in:
Nick Craig-Wood 2023-11-24 12:58:40 +00:00
parent fabeb8e44e
commit cc2a4c2e20
2 changed files with 30 additions and 59 deletions

View File

@ -1,19 +1,11 @@
package b2
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test b2 string encoding
@ -178,56 +170,9 @@ func TestParseTimeString(t *testing.T) {
}
// The integration tests do a reasonable job of testing the normal
// streaming upload but don't test the chunked streaming upload.
func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) {
ctx := context.Background()
contents := random.String(size)
item := fstest.NewItem(fmt.Sprintf("chunked-streaming-upload-%d", size), contents, fstest.Time("2001-05-06T04:05:06.499Z"))
// Set chunk size to mininum value so we make chunks
origOpt := f.opt
f.opt.ChunkSize = minChunkSize
f.opt.UploadCutoff = 0
defer func() {
f.opt = origOpt
}()
// Do the streaming upload
src := object.NewStaticObjectInfo(item.Path, item.ModTime, -1, true, item.Hashes, f)
in := bytes.NewBufferString(contents)
dst, err := f.PutStream(ctx, in, src)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, int64(size), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.Equal(t, srcModTime, dstModTime)
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents, "Contents incorrect")
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
for _, size := range []fs.SizeSuffix{
minChunkSize - 1,
minChunkSize,
minChunkSize + 1,
(3 * minChunkSize) / 2,
(5 * minChunkSize) / 2,
} {
t.Run(fmt.Sprintf("ChunkedStreamingUpload/%d", size), func(t *testing.T) {
f.InternalTestChunkedStreamingUpload(t, int(size))
})
}
// Internal tests go here
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@ -230,8 +230,10 @@ func testPutMimeType(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.It
return contents, PutTestContentsMetadata(ctx, t, f, file, contents, true, mimeType, metadata)
}
// TestPutLarge puts file to the remote, checks it and removes it on success.
func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
// testPutLarge puts file to the remote, checks it and removes it on success.
//
// If stream is set, then it uploads the file with size -1
func testPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, stream bool) {
var (
err error
obj fs.Object
@ -242,7 +244,11 @@ func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item)
uploadHash = hash.NewMultiHasher()
in := io.TeeReader(r, uploadHash)
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
size := file.Size
if stream {
size = -1
}
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, size, true, nil, nil)
obj, err = f.Put(ctx, in, obji)
if file.Size == 0 && err == fs.ErrorCantUploadEmptyFiles {
t.Skip("Can't upload zero length files")
@ -270,6 +276,16 @@ func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item)
require.NoError(t, obj.Remove(ctx))
}
// TestPutLarge puts file to the remote, checks it and removes it on success.
func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
testPutLarge(ctx, t, f, file, false)
}
// TestPutLargeStreamed puts file of unknown size to the remote, checks it and removes it on success.
func TestPutLargeStreamed(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
testPutLarge(ctx, t, f, file, true)
}
// ReadObject reads the contents of an object as a string
func ReadObject(ctx context.Context, t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string {
what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options)
@ -2097,6 +2113,16 @@ func Run(t *testing.T, opt *Opt) {
Path: fmt.Sprintf("chunked-%s-%s.bin", cs.String(), fileSize.String()),
Size: int64(fileSize),
})
t.Run("Streamed", func(t *testing.T) {
if f.Features().PutStream == nil {
t.Skip("FS has no PutStream interface")
}
TestPutLargeStreamed(ctx, t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunked-%s-%s-streamed.bin", cs.String(), fileSize.String()),
Size: int64(fileSize),
})
})
})
}
})