1
mirror of https://github.com/rclone/rclone synced 2024-09-08 02:45:34 +02:00

fstest: calculate hashes for uploaded test files to fix minio integration tests

Before this change we didn't calculate any hashes for test files
created in the Run framework.

This means that files were uploaded to S3 without a `Content-MD5`
header.  This in turn caused minio to disengage `--compat` mode which
in turn caused the `TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime`
test to fail in `fs/sync`.

After this change we supply all hashes supported by the destination Fs
on the upload object.

This means that the `Content-MD5` is set and minio engages `--compat`
mode to fix the problem.  Using `--compat` on the command line also
fixes the problem.

This much better replicates how objects are actually uploaded with
operations.Copy so should improve the integration tests.
This commit is contained in:
Nick Craig-Wood 2019-09-16 10:59:01 +01:00
parent b8b12a4000
commit 66347aff2a

View File

@ -41,6 +41,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/walk"
"github.com/stretchr/testify/assert"
@ -252,10 +253,22 @@ func (r *Run) WriteObjectTo(ctx context.Context, f fs.Fs, remote, content string
}
}
r.Mkdir(ctx, f)
// caclulate all hashes f supports for content
hash, err := hash.NewMultiHasherTypes(f.Hashes())
if err != nil {
r.Fatalf("Failed to make new multi hasher: %v", err)
}
_, err = hash.Write([]byte(content))
if err != nil {
r.Fatalf("Failed to make write to hash: %v", err)
}
hashSums := hash.Sums()
const maxTries = 10
for tries := 1; ; tries++ {
in := bytes.NewBufferString(content)
objinfo := object.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, nil, nil)
objinfo := object.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, hashSums, nil)
_, err := put(ctx, in, objinfo)
if err == nil {
break