1
mirror of https://github.com/rclone/rclone synced 2024-12-23 14:23:44 +01:00

b2: fix server side copies greater than 4GB

After the multipart chunker refactor the multipart chunked server side
copy was accidentally sending one part too many. The last part was 0
length which was rejected by b2.

This was caused by a simple off by one error in the refactoring where
we went from 1 based part number counting to 0 based part number
counting.

Fixing this revealed that the metadata wasn't being re-read for the
copied object either.

This fixes both of those issues and adds an integration tests so it
won't happen again.

See: https://forum.rclone.org/t/large-server-side-copy-in-b2-fails-due-to-bad-byte-range/42294
This commit is contained in:
Nick Craig-Wood 2023-10-12 11:15:42 +01:00
parent 5afa838457
commit d8d76ff647
3 changed files with 60 additions and 2 deletions

View File

@ -1332,7 +1332,11 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil {
return err
}
return up.Copy(ctx)
err = up.Copy(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
}
dstBucket, dstPath := dstObj.split()

View File

@ -1,10 +1,15 @@
package b2
import (
"context"
"testing"
"time"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test b2 string encoding
@ -168,3 +173,52 @@ func TestParseTimeString(t *testing.T) {
}
}
// The integration tests do a reasonable job of testing the normal
// copy but don't test the chunked copy.
func (f *Fs) InternalTestChunkedCopy(t *testing.T) {
ctx := context.Background()
contents := random.String(8 * 1024 * 1024)
item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
src := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, src.Remove(ctx))
}()
var itemCopy = item
itemCopy.Path += ".copy"
// Set copy cutoff to mininum value so we make chunks
origCutoff := f.opt.CopyCutoff
f.opt.CopyCutoff = 5 * 1024 * 1024
defer func() {
f.opt.CopyCutoff = origCutoff
}()
// Do the copy
dst, err := f.Copy(ctx, src, itemCopy.Path)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, src.Size(), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.True(t, srcModTime.Equal(dstModTime))
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkedCopy", f.InternalTestChunkedCopy)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@ -456,7 +456,7 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
remaining = up.size
)
g.SetLimit(up.f.opt.UploadConcurrency)
for part := 0; part <= up.parts; part++ {
for part := 0; part < up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {