1
mirror of https://github.com/rclone/rclone synced 2024-11-27 05:23:40 +01:00
rclone/fs/chunksize/chunksize.go
Nick Craig-Wood 0501773db1 azureblob,b2,s3: fix chunksize calculations producing too many parts
Before this fix, the chunksize calculator was using the previous size
of the object, not the new size of the object to calculate the chunk
sizes.

This meant that uploading a replacement object which needed a new
chunk size would fail, using too many parts.

This fix fixes the calculator to take the size explicitly.
2022-08-09 12:57:38 +01:00

45 lines
1.9 KiB
Go

// Package chunksize calculates a suitable chunk size for large uploads
package chunksize
import (
"github.com/rclone/rclone/fs"
)
// Calculator calculates the minimum chunk size needed to fit within
// the maximum number of parts, rounded up to the nearest fs.Mebi.
//
// For most backends, (chunk_size) * (concurrent_upload_routines)
// memory will be required so we want to use the smallest possible
// chunk size that's going to allow the upload to proceed. Rounding up
// to the nearest fs.Mebi on the assumption that some backends may
// only allow integer type parameters when specifying the chunk size.
//
// Returns the default chunk size if it is sufficiently large enough
// to support the given file size otherwise returns the smallest chunk
// size necessary to allow the upload to proceed.
func Calculator(o interface{}, size int64, maxParts int, defaultChunkSize fs.SizeSuffix) fs.SizeSuffix {
// If streaming then use default chunk size
if size < 0 {
fs.Debugf(o, "Streaming upload with chunk_size %s allows uploads of up to %s and will fail only when that limit is reached.", defaultChunkSize, fs.SizeSuffix(maxParts)*defaultChunkSize)
return defaultChunkSize
}
fileSize := fs.SizeSuffix(size)
requiredChunks := fileSize / defaultChunkSize
if requiredChunks < fs.SizeSuffix(maxParts) || (requiredChunks == fs.SizeSuffix(maxParts) && fileSize%defaultChunkSize == 0) {
return defaultChunkSize
}
minChunk := fileSize / fs.SizeSuffix(maxParts)
remainder := minChunk % fs.Mebi
if remainder != 0 {
minChunk += fs.Mebi - remainder
}
if fileSize/minChunk == fs.SizeSuffix(maxParts) && fileSize%fs.SizeSuffix(maxParts) != 0 { // when right on the boundary, we need to add a MiB
minChunk += fs.Mebi
}
fs.Debugf(o, "size: %v, parts: %v, default: %v, new: %v; default chunk size insufficient, returned new chunk size", fileSize, maxParts, defaultChunkSize, minChunk)
return minChunk
}