1
mirror of https://github.com/rclone/rclone synced 2024-11-26 04:07:22 +01:00

vfs: add --vfs-cache-min-free-space to control minimum free space on the disk containing the cache

See: https://forum.rclone.org/t/rclone-fails-to-control-disk-usage-and-its-filling-the-disk-to-100/41494/
This commit is contained in:
Nick Craig-Wood 2023-09-05 12:38:19 +01:00
parent 39f910a65d
commit 63ab250817
5 changed files with 109 additions and 35 deletions

View File

@ -88,6 +88,7 @@ find that you need one or the other or both.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@ -106,14 +107,15 @@ seconds. If rclone is quit or dies with files that haven't been
uploaded, these will be uploaded next time rclone is run with the same
flags.
If using !--vfs-cache-max-size! note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
!--vfs-cache-poll-interval!. Secondly because open files cannot be
evicted from the cache. When !--vfs-cache-max-size!
is exceeded, rclone will attempt to evict the least accessed files
from the cache first. rclone will start with files that haven't
been accessed for the longest. This cache flushing strategy is
efficient and more relevant files are likely to remain cached.
If using !--vfs-cache-max-size! or !--vfs-cache-min-free-size! note
that the cache may exceed these quotas for two reasons. Firstly
because it is only checked every !--vfs-cache-poll-interval!. Secondly
because open files cannot be evicted from the cache. When
!--vfs-cache-max-size! or !--vfs-cache-min-free-size! is exceeded,
rclone will attempt to evict the least accessed files from the cache
first. rclone will start with files that haven't been accessed for the
longest. This cache flushing strategy is efficient and more relevant
files are likely to remain cached.
The !--vfs-cache-max-age! will evict files from the cache
after the set time since last access has passed. The default value of

View File

@ -22,6 +22,7 @@ import (
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/diskusage"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/vfs/vfscache/writeback"
@ -607,16 +608,17 @@ func (c *Cache) retryFailedResets() {
}
}
func (c *Cache) purgeClean(quota int64) {
// Remove cache files that are not dirty until the quota is satisfied
func (c *Cache) purgeClean() {
c.mu.Lock()
defer c.mu.Unlock()
var items Items
if quota <= 0 || c.used < quota {
if c.quotasOK() {
return
}
var items Items
// Make a slice of clean cache files
for _, item := range c.item {
if !item.IsDirty() {
@ -628,7 +630,7 @@ func (c *Cache) purgeClean(quota int64) {
// Reset items until the quota is OK
for _, item := range items {
if c.used < quota {
if c.quotasOK() {
break
}
resetResult, spaceFreed, err := item.Reset()
@ -661,7 +663,7 @@ func (c *Cache) purgeOld(maxAge time.Duration) {
for _, item := range c.item {
c.removeNotInUse(item, maxAge, false)
}
if c.used < int64(c.opt.CacheMaxSize) {
if c.quotasOK() {
c.outOfSpace = false
c.cond.Broadcast()
}
@ -693,16 +695,53 @@ func (c *Cache) updateUsed() (used int64) {
return newUsed
}
// Check the available space for a disk is in limits.
func (c *Cache) minFreeSpaceQuotaOK() bool {
if c.opt.CacheMinFreeSpace <= 0 {
return true
}
du, err := diskusage.New(config.GetCacheDir())
if err == diskusage.ErrUnsupported {
return true
}
if err != nil {
fs.Errorf(nil, "disk usage returned error: %v", err)
return true
}
return du.Available >= uint64(c.opt.CacheMinFreeSpace)
}
// Check the available quota for a disk is in limits.
//
// must be called with mu held.
func (c *Cache) maxSizeQuotaOK() bool {
if c.opt.CacheMaxSize <= 0 {
return true
}
return c.used <= int64(c.opt.CacheMaxSize)
}
// Check the available quotas for a disk is in limits.
//
// must be called with mu held.
func (c *Cache) quotasOK() bool {
return c.maxSizeQuotaOK() && c.minFreeSpaceQuotaOK()
}
// Return true if any quotas set
func (c *Cache) haveQuotas() bool {
return c.opt.CacheMaxSize > 0 || c.opt.CacheMinFreeSpace > 0
}
// Remove clean cache files that are not open until the total space
// is reduced below quota starting from the oldest first
func (c *Cache) purgeOverQuota(quota int64) {
func (c *Cache) purgeOverQuota() {
c.updateUsed()
c.mu.Lock()
defer c.mu.Unlock()
if quota <= 0 || c.used < quota {
if c.quotasOK() {
return
}
@ -719,9 +758,9 @@ func (c *Cache) purgeOverQuota(quota int64) {
// Remove items until the quota is OK
for _, item := range items {
c.removeNotInUse(item, 0, c.used <= quota)
c.removeNotInUse(item, 0, c.quotasOK())
}
if c.used < quota {
if c.quotasOK() {
c.outOfSpace = false
c.cond.Broadcast()
}
@ -743,12 +782,12 @@ func (c *Cache) clean(kicked bool) {
c.purgeOld(c.opt.CacheMaxAge)
// If have a maximum cache size...
if int64(c.opt.CacheMaxSize) > 0 {
if c.haveQuotas() {
// Remove files not in use until cache size is below quota starting from the oldest first
c.purgeOverQuota(int64(c.opt.CacheMaxSize))
c.purgeOverQuota()
// Remove cache files that are not dirty if we are still above the max cache size
c.purgeClean(int64(c.opt.CacheMaxSize))
c.purgeClean()
c.retryFailedResets()
}

View File

@ -10,7 +10,10 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local" // import the local backend
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/diskusage"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -354,7 +357,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
}, itemAsString(c))
// Check nothing removed
c.purgeOverQuota(1)
c.opt.CacheMaxSize = 1
c.purgeOverQuota()
// Close the files
require.NoError(t, potato.Close(nil))
@ -373,7 +377,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
potato2.info.ATime = t1
// Check only potato removed to get below quota
c.purgeOverQuota(10)
c.opt.CacheMaxSize = 10
c.purgeOverQuota()
assert.Equal(t, int64(6), c.used)
assert.Equal(t, []string{
@ -399,7 +404,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
potato.info.ATime = t2
// Check only potato2 removed to get below quota
c.purgeOverQuota(10)
c.opt.CacheMaxSize = 10
c.purgeOverQuota()
assert.Equal(t, int64(5), c.used)
c.purgeEmptyDirs("", true)
@ -408,7 +414,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
}, itemAsString(c))
// Now purge everything
c.purgeOverQuota(1)
c.opt.CacheMaxSize = 1
c.purgeOverQuota()
assert.Equal(t, int64(0), c.used)
c.purgeEmptyDirs("", true)
@ -420,6 +427,26 @@ func TestCachePurgeOverQuota(t *testing.T) {
assert.Equal(t, []string(nil), itemAsString(c))
}
func TestCachePurgeMinFreeSpace(t *testing.T) {
du, err := diskusage.New(config.GetCacheDir())
if err == diskusage.ErrUnsupported {
t.Skip(err)
}
// We've tested the quota mechanism already, so just test the
// min free space quota is working.
_, c := newTestCache(t)
// First set free space quota very small and check it is OK
c.opt.CacheMinFreeSpace = 1
assert.True(t, c.minFreeSpaceQuotaOK())
assert.True(t, c.quotasOK())
// Now set it a bit larger than the current disk available and check it is BAD
c.opt.CacheMinFreeSpace = fs.SizeSuffix(du.Available) + fs.Gibi
assert.False(t, c.minFreeSpaceQuotaOK())
assert.False(t, c.quotasOK())
}
// test reset clean files
func TestCachePurgeClean(t *testing.T) {
r, c := newItemTestCache(t)
@ -453,7 +480,8 @@ func TestCachePurgeClean(t *testing.T) {
require.NoError(t, potato3.Truncate(6))
c.updateUsed()
c.purgeClean(1)
c.opt.CacheMaxSize = 1
c.purgeClean()
assert.Equal(t, []string{
`name="existing" opens=2 size=100 space=0`,
`name="sub/dir/potato2" opens=1 size=5 space=5`,
@ -462,7 +490,8 @@ func TestCachePurgeClean(t *testing.T) {
assert.Equal(t, int64(11), c.used)
require.NoError(t, potato2.Close(nil))
c.purgeClean(1)
c.opt.CacheMaxSize = 1
c.purgeClean()
assert.Equal(t, []string{
`name="existing" opens=2 size=100 space=0`,
`name="sub/dir/potato3" opens=1 size=6 space=6`,
@ -476,7 +505,8 @@ func TestCachePurgeClean(t *testing.T) {
// Remove all files now. The are all not in use.
// purgeClean does not remove empty cache files. purgeOverQuota does.
// So we use purgeOverQuota here for the cleanup.
c.purgeOverQuota(1)
c.opt.CacheMaxSize = 1
c.purgeOverQuota()
c.purgeEmptyDirs("", true)

View File

@ -26,6 +26,7 @@ type Options struct {
CacheMode CacheMode
CacheMaxAge time.Duration
CacheMaxSize fs.SizeSuffix
CacheMinFreeSpace fs.SizeSuffix
CachePollInterval time.Duration
CaseInsensitive bool
WriteWait time.Duration // time to wait for in-sequence write
@ -56,6 +57,7 @@ var DefaultOpt = Options{
ChunkSize: 128 * fs.Mebi,
ChunkSizeLimit: -1,
CacheMaxSize: -1,
CacheMinFreeSpace: -1,
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
WriteWait: 1000 * time.Millisecond,
ReadWait: 20 * time.Millisecond,

View File

@ -28,6 +28,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects", "VFS")
flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max time since last access of objects in the cache", "VFS")
flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache", "VFS")
flags.FVarP(flagSet, &Opt.CacheMinFreeSpace, "vfs-cache-min-free-space", "", "Target minimum free space on the disk containing the cache", "VFS")
flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks", "VFS")
flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited)", "VFS")
flags.FVarP(flagSet, DirPerms, "dir-perms", "", "Directory permissions", "VFS")