1
mirror of https://github.com/rclone/rclone synced 2025-01-03 03:46:24 +01:00

s3: list an unlimited number of items - fixes #22

This commit is contained in:
Nick Craig-Wood 2015-02-10 17:58:29 +00:00
parent 8d1c0ad07c
commit 0faed16899

View File

@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"log"
"mime"
"net/http"
"path"
@ -102,6 +103,7 @@ func init() {
// Constants
const (
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
listChunkSize = 1024 // number of items to read at once
)
// FsS3 represents a remote s3 server
@ -267,8 +269,9 @@ func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
if directories {
delimiter = "/"
}
// FIXME need to implement ALL loop
objects, err := f.b.List(f.root, delimiter, "", 10000)
marker := ""
for {
objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
@ -298,6 +301,16 @@ func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
}
}
}
if !objects.IsTruncated {
break
}
// Use NextMarker if set, otherwise use last Key
marker = objects.NextMarker
if marker == "" {
marker = objects.Contents[len(objects.Contents)-1].Key
}
log.Printf("retry with marker = %q", marker)
}
}
// Walk the path returning a channel of FsObjects