From 8ffe3e462cbf5688c37c54009db09d8dcb486860 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Wed, 27 Sep 2023 17:48:40 +0100 Subject: [PATCH] webdav: nextcloud: implement backoff and retry for 423 LOCKED errors When uploading chunked files to nextcloud, it gives a 423 error while it is merging files. This waits for an exponentially increasing amount of time for it to clear. If after we have received a 423 error we receive a 404 error then we assume all is good as this is what appears to happen in practice. Fixes #7109 --- backend/webdav/chunking.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/backend/webdav/chunking.go b/backend/webdav/chunking.go index 4cea79838..379079cf9 100644 --- a/backend/webdav/chunking.go +++ b/backend/webdav/chunking.go @@ -14,21 +14,30 @@ import ( "io" "net/http" "path" + "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) -func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) { +func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) { // Not found. Can be returned by NextCloud when merging chunks of an upload. if resp != nil && resp.StatusCode == 404 { + if *wasLocked { + // Assume a 404 error after we've received a 423 error is actually a success + return false, nil + } return true, err } // 423 LOCKED if resp != nil && resp.StatusCode == 423 { - return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err) + *wasLocked = true + fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime) + time.Sleep(*sleepTime) + *sleepTime *= 2 + return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err) } return f.shouldRetry(ctx, resp, err) @@ -180,9 +189,11 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs } opts.ExtraHeaders = o.extraHeaders(ctx, src) opts.ExtraHeaders["Destination"] = destinationURL.String() + sleepTime := 5 * time.Second + wasLocked := false err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) - return o.fs.shouldRetryChunkMerge(ctx, resp, err) + return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked) }) if err != nil { return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)