mirror of
https://github.com/rclone/rclone
synced 2025-03-24 07:04:23 +01:00
s3: make reading metadata more reliable to work around eventual consistency problems
This commit is contained in:
parent
2f9f9afac2
commit
0b51d6221a
17
s3/s3.go
17
s3/s3.go
@ -421,13 +421,28 @@ func (o *FsObjectS3) Size() int64 {
|
|||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
|
// if we get a 404 error then we retry a few times for eventual
|
||||||
|
// consistency reasons
|
||||||
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *FsObjectS3) readMetaData() (err error) {
|
func (o *FsObjectS3) readMetaData() (err error) {
|
||||||
if o.meta != nil {
|
if o.meta != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
var headers s3.Headers
|
||||||
|
|
||||||
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
|
// Try reading the metadata a few times (with exponential
|
||||||
|
// backoff) to get around eventual consistency on 404 error
|
||||||
|
for tries := uint(0); tries < 10; tries++ {
|
||||||
|
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
|
||||||
|
if s3Err, ok := err.(*s3.Error); ok {
|
||||||
|
if s3Err.StatusCode == http.StatusNotFound {
|
||||||
|
time.Sleep(5 * time.Millisecond << tries)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debug(o, "Failed to read info: %s", err)
|
fs.Debug(o, "Failed to read info: %s", err)
|
||||||
return err
|
return err
|
||||||
|
Loading…
x
Reference in New Issue
Block a user