1
mirror of https://github.com/rclone/rclone synced 2024-11-23 00:06:55 +01:00

amazonclouddrive: Restart directory listings on error - fixes #475

Before this change rclone would retry only the page that was missing
from the directory listing.  However it turns out that on 429 errors
at least, that page is gone from the directory listing which results
in missing files in the list.  The workaround for this is to restart
the directory listing on any retryable errors.
This commit is contained in:
Nick Craig-Wood 2016-05-14 17:15:42 +01:00
parent ac9c20b048
commit 536526cc92

View File

@ -308,11 +308,11 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
Filters: query,
}
var nodes []*acd.Node
var out []*acd.Node
//var resp *http.Response
OUTER:
for {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
err = f.pacer.CallNoRetry(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return shouldRetry(resp, err)
})
@ -328,48 +328,64 @@ OUTER:
if *node.Status != statusAvailable {
continue
}
if fn(node) {
found = true
break OUTER
}
// Store the nodes up in case we have to retry the listing
out = append(out, node)
}
}
}
// Send the nodes now
for _, node := range out {
if fn(node) {
found = true
break
}
}
return
}
// ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
fs.Debug(f, "Reading %q", job.Path)
_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
remote := job.Path + *node.Name
switch *node.Kind {
case folderKind:
if out.IncludeDirectory(remote) {
dir := &fs.Dir{
Name: remote,
Bytes: -1,
Count: -1,
maxTries := fs.Config.LowLevelRetries
for tries := 1; tries <= maxTries; tries++ {
_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
remote := job.Path + *node.Name
switch *node.Kind {
case folderKind:
if out.IncludeDirectory(remote) {
dir := &fs.Dir{
Name: remote,
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
if out.AddDir(dir) {
return true
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
}
}
dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
if out.AddDir(dir) {
return true
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
case fileKind:
if o := f.newFsObjectWithInfo(remote, node); o != nil {
if out.Add(o) {
return true
}
}
default:
// ignore ASSET etc
}
case fileKind:
if o := f.newFsObjectWithInfo(remote, node); o != nil {
if out.Add(o) {
return true
}
}
default:
// ignore ASSET etc
return false
})
if fs.IsRetryError(err) {
fs.Debug(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
continue
}
return false
})
if err != nil {
return nil, err
}
break
}
fs.Debug(f, "Finished reading %q", job.Path)
return jobs, err
}