mirror of
https://github.com/rclone/rclone
synced 2024-12-30 23:46:23 +01:00
cache: plex integration, refactor chunk storage and worker retries (#1899)
This commit is contained in:
parent
b05e472d2e
commit
b48b537325
326
cache/cache.go
vendored
326
cache/cache.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@ -7,7 +7,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -17,6 +16,7 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/crypt"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -26,22 +26,20 @@ import (
|
|||||||
const (
|
const (
|
||||||
// DefCacheChunkSize is the default value for chunk size
|
// DefCacheChunkSize is the default value for chunk size
|
||||||
DefCacheChunkSize = "5M"
|
DefCacheChunkSize = "5M"
|
||||||
|
// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
|
||||||
|
DefCacheTotalChunkSize = "10G"
|
||||||
|
// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
|
||||||
|
DefCacheChunkCleanInterval = "1m"
|
||||||
// DefCacheInfoAge is the default value for object info age
|
// DefCacheInfoAge is the default value for object info age
|
||||||
DefCacheInfoAge = "6h"
|
DefCacheInfoAge = "6h"
|
||||||
// DefCacheChunkAge is the default value for chunk age duration
|
|
||||||
DefCacheChunkAge = "3h"
|
|
||||||
// DefCacheMetaAge is the default value for chunk age duration
|
|
||||||
DefCacheMetaAge = "3h"
|
|
||||||
// DefCacheReadRetries is the default value for read retries
|
// DefCacheReadRetries is the default value for read retries
|
||||||
DefCacheReadRetries = 3
|
DefCacheReadRetries = 10
|
||||||
// DefCacheTotalWorkers is how many workers run in parallel to download chunks
|
// DefCacheTotalWorkers is how many workers run in parallel to download chunks
|
||||||
DefCacheTotalWorkers = 4
|
DefCacheTotalWorkers = 4
|
||||||
// DefCacheChunkNoMemory will enable or disable in-memory storage for chunks
|
// DefCacheChunkNoMemory will enable or disable in-memory storage for chunks
|
||||||
DefCacheChunkNoMemory = false
|
DefCacheChunkNoMemory = false
|
||||||
// DefCacheRps limits the number of requests per second to the source FS
|
// DefCacheRps limits the number of requests per second to the source FS
|
||||||
DefCacheRps = -1
|
DefCacheRps = -1
|
||||||
// DefWarmUpRatePerSeconds will apply a special config for warming up the cache
|
|
||||||
DefWarmUpRatePerSeconds = "3/20"
|
|
||||||
// DefCacheWrites will cache file data on writes through the cache
|
// DefCacheWrites will cache file data on writes through the cache
|
||||||
DefCacheWrites = false
|
DefCacheWrites = false
|
||||||
)
|
)
|
||||||
@ -49,18 +47,17 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Flags
|
// Flags
|
||||||
cacheDbPath = fs.StringP("cache-db-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cache DB")
|
cacheDbPath = fs.StringP("cache-db-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cache DB")
|
||||||
cacheDbPurge = fs.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
|
cacheDbPurge = fs.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
|
||||||
cacheChunkSize = fs.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
|
cacheChunkSize = fs.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
|
||||||
cacheInfoAge = fs.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
|
cacheTotalChunkSize = fs.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
|
||||||
cacheChunkAge = fs.StringP("cache-chunk-age", "", DefCacheChunkAge, "How much time should a chunk be in cache before cleanup")
|
cacheChunkCleanInterval = fs.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
|
||||||
cacheMetaAge = fs.StringP("cache-warm-up-age", "", DefCacheMetaAge, "How much time should data be cached during warm up")
|
cacheInfoAge = fs.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
|
||||||
cacheReadRetries = fs.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
|
cacheReadRetries = fs.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
|
||||||
cacheTotalWorkers = fs.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
|
cacheTotalWorkers = fs.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
|
||||||
cacheChunkNoMemory = fs.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
|
cacheChunkNoMemory = fs.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
|
||||||
cacheRps = fs.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
|
cacheRps = fs.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
|
||||||
cacheWarmUp = fs.StringP("cache-warm-up-rps", "", DefWarmUpRatePerSeconds, "Format is X/Y = how many X opens per Y seconds should trigger the warm up mode. See the docs")
|
cacheStoreWrites = fs.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
|
||||||
cacheStoreWrites = fs.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -72,6 +69,19 @@ func init() {
|
|||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
|
}, {
|
||||||
|
Name: "plex_url",
|
||||||
|
Help: "Optional: The URL of the Plex server",
|
||||||
|
Optional: true,
|
||||||
|
}, {
|
||||||
|
Name: "plex_username",
|
||||||
|
Help: "Optional: The username of the Plex user",
|
||||||
|
Optional: true,
|
||||||
|
}, {
|
||||||
|
Name: "plex_password",
|
||||||
|
Help: "Optional: The password of the Plex user",
|
||||||
|
IsPassword: true,
|
||||||
|
Optional: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading. \nDefault: " + DefCacheChunkSize,
|
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading. \nDefault: " + DefCacheChunkSize,
|
||||||
@ -105,34 +115,18 @@ func init() {
|
|||||||
},
|
},
|
||||||
Optional: true,
|
Optional: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_age",
|
Name: "chunk_total_size",
|
||||||
Help: "How much time should a chunk (file data) be stored in cache. \nAccepted units are: \"s\", \"m\", \"h\".\nDefault: " + DefCacheChunkAge,
|
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted. \nDefault: " + DefCacheTotalChunkSize,
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "30s",
|
Value: "500M",
|
||||||
Help: "30 seconds",
|
Help: "500 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "1m",
|
Value: "1G",
|
||||||
Help: "1 minute",
|
Help: "1 GB",
|
||||||
}, {
|
}, {
|
||||||
Value: "1h30m",
|
Value: "10G",
|
||||||
Help: "1 hour and 30 minutes",
|
Help: "10 GB",
|
||||||
},
|
|
||||||
},
|
|
||||||
Optional: true,
|
|
||||||
}, {
|
|
||||||
Name: "warmup_age",
|
|
||||||
Help: "How much time should data be cached during warm up. \nAccepted units are: \"s\", \"m\", \"h\".\nDefault: " + DefCacheMetaAge,
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "3h",
|
|
||||||
Help: "3 hours",
|
|
||||||
}, {
|
|
||||||
Value: "6h",
|
|
||||||
Help: "6 hours",
|
|
||||||
}, {
|
|
||||||
Value: "24h",
|
|
||||||
Help: "24 hours",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -149,10 +143,7 @@ type ChunkStorage interface {
|
|||||||
GetChunk(cachedObject *Object, offset int64) ([]byte, error)
|
GetChunk(cachedObject *Object, offset int64) ([]byte, error)
|
||||||
|
|
||||||
// add a new chunk
|
// add a new chunk
|
||||||
AddChunk(cachedObject *Object, data []byte, offset int64) error
|
AddChunk(fp string, data []byte, offset int64) error
|
||||||
|
|
||||||
// AddChunkAhead adds a new chunk before caching an Object for it
|
|
||||||
AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error
|
|
||||||
|
|
||||||
// if the storage can cleanup on a cron basis
|
// if the storage can cleanup on a cron basis
|
||||||
// otherwise it can do a noop operation
|
// otherwise it can do a noop operation
|
||||||
@ -161,6 +152,10 @@ type ChunkStorage interface {
|
|||||||
// if the storage can cleanup chunks after we no longer need them
|
// if the storage can cleanup chunks after we no longer need them
|
||||||
// otherwise it can do a noop operation
|
// otherwise it can do a noop operation
|
||||||
CleanChunksByNeed(offset int64)
|
CleanChunksByNeed(offset int64)
|
||||||
|
|
||||||
|
// if the storage can cleanup chunks after the total size passes a certain point
|
||||||
|
// otherwise it can do a noop operation
|
||||||
|
CleanChunksBySize(maxSize int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storage is a storage type (Bolt) which needs to support both chunk and file based operations
|
// Storage is a storage type (Bolt) which needs to support both chunk and file based operations
|
||||||
@ -213,26 +208,21 @@ type Fs struct {
|
|||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
cache Storage
|
cache Storage
|
||||||
|
|
||||||
fileAge time.Duration
|
fileAge time.Duration
|
||||||
chunkSize int64
|
chunkSize int64
|
||||||
chunkAge time.Duration
|
chunkTotalSize int64
|
||||||
metaAge time.Duration
|
chunkCleanInterval time.Duration
|
||||||
readRetries int
|
readRetries int
|
||||||
totalWorkers int
|
totalWorkers int
|
||||||
chunkMemory bool
|
totalMaxWorkers int
|
||||||
warmUp bool
|
chunkMemory bool
|
||||||
warmUpRate int
|
cacheWrites bool
|
||||||
warmUpSec int
|
|
||||||
cacheWrites bool
|
|
||||||
originalTotalWorkers int
|
|
||||||
originalChunkMemory bool
|
|
||||||
|
|
||||||
lastChunkCleanup time.Time
|
lastChunkCleanup time.Time
|
||||||
lastRootCleanup time.Time
|
lastRootCleanup time.Time
|
||||||
lastOpenedEntries map[string]time.Time
|
cleanupMu sync.Mutex
|
||||||
cleanupMu sync.Mutex
|
rateLimiter *rate.Limiter
|
||||||
warmupMu sync.Mutex
|
plexConnector *plexConnector
|
||||||
rateLimiter *rate.Limiter
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
@ -245,12 +235,13 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||||||
// Look for a file first
|
// Look for a file first
|
||||||
remotePath := path.Join(remote, rpath)
|
remotePath := path.Join(remote, rpath)
|
||||||
wrappedFs, wrapErr := fs.NewFs(remotePath)
|
wrappedFs, wrapErr := fs.NewFs(remotePath)
|
||||||
|
|
||||||
if wrapErr != fs.ErrorIsFile && wrapErr != nil {
|
if wrapErr != fs.ErrorIsFile && wrapErr != nil {
|
||||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||||
}
|
}
|
||||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||||
|
|
||||||
|
plexURL := fs.ConfigFileGet(name, "plex_url")
|
||||||
|
plexToken := fs.ConfigFileGet(name, "plex_token")
|
||||||
var chunkSize fs.SizeSuffix
|
var chunkSize fs.SizeSuffix
|
||||||
chunkSizeString := fs.ConfigFileGet(name, "chunk_size", DefCacheChunkSize)
|
chunkSizeString := fs.ConfigFileGet(name, "chunk_size", DefCacheChunkSize)
|
||||||
if *cacheChunkSize != DefCacheChunkSize {
|
if *cacheChunkSize != DefCacheChunkSize {
|
||||||
@ -260,6 +251,20 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to understand chunk size", chunkSizeString)
|
return nil, errors.Wrapf(err, "failed to understand chunk size", chunkSizeString)
|
||||||
}
|
}
|
||||||
|
var chunkTotalSize fs.SizeSuffix
|
||||||
|
chunkTotalSizeString := fs.ConfigFileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
|
||||||
|
if *cacheTotalChunkSize != DefCacheTotalChunkSize {
|
||||||
|
chunkTotalSizeString = *cacheTotalChunkSize
|
||||||
|
}
|
||||||
|
err = chunkTotalSize.Set(chunkTotalSizeString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to understand chunk total size", chunkTotalSizeString)
|
||||||
|
}
|
||||||
|
chunkCleanIntervalStr := *cacheChunkCleanInterval
|
||||||
|
chunkCleanInterval, err := time.ParseDuration(chunkCleanIntervalStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr)
|
||||||
|
}
|
||||||
infoAge := fs.ConfigFileGet(name, "info_age", DefCacheInfoAge)
|
infoAge := fs.ConfigFileGet(name, "info_age", DefCacheInfoAge)
|
||||||
if *cacheInfoAge != DefCacheInfoAge {
|
if *cacheInfoAge != DefCacheInfoAge {
|
||||||
infoAge = *cacheInfoAge
|
infoAge = *cacheInfoAge
|
||||||
@ -268,58 +273,70 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to understand duration", infoAge)
|
return nil, errors.Wrapf(err, "failed to understand duration", infoAge)
|
||||||
}
|
}
|
||||||
chunkAge := fs.ConfigFileGet(name, "chunk_age", DefCacheChunkAge)
|
|
||||||
if *cacheChunkAge != DefCacheChunkAge {
|
|
||||||
chunkAge = *cacheChunkAge
|
|
||||||
}
|
|
||||||
chunkDuration, err := time.ParseDuration(chunkAge)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand duration", chunkAge)
|
|
||||||
}
|
|
||||||
metaAge := fs.ConfigFileGet(name, "warmup_age", DefCacheMetaAge)
|
|
||||||
if *cacheMetaAge != DefCacheMetaAge {
|
|
||||||
metaAge = *cacheMetaAge
|
|
||||||
}
|
|
||||||
metaDuration, err := time.ParseDuration(metaAge)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand duration", metaAge)
|
|
||||||
}
|
|
||||||
warmupRps := strings.Split(*cacheWarmUp, "/")
|
|
||||||
warmupRate, err := strconv.Atoi(warmupRps[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand warm up rate", *cacheWarmUp)
|
|
||||||
}
|
|
||||||
warmupSec, err := strconv.Atoi(warmupRps[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand warm up seconds", *cacheWarmUp)
|
|
||||||
}
|
|
||||||
// configure cache backend
|
// configure cache backend
|
||||||
if *cacheDbPurge {
|
if *cacheDbPurge {
|
||||||
fs.Debugf(name, "Purging the DB")
|
fs.Debugf(name, "Purging the DB")
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
name: name,
|
name: name,
|
||||||
root: rpath,
|
root: rpath,
|
||||||
fileAge: infoDuration,
|
fileAge: infoDuration,
|
||||||
chunkSize: int64(chunkSize),
|
chunkSize: int64(chunkSize),
|
||||||
chunkAge: chunkDuration,
|
chunkTotalSize: int64(chunkTotalSize),
|
||||||
metaAge: metaDuration,
|
chunkCleanInterval: chunkCleanInterval,
|
||||||
readRetries: *cacheReadRetries,
|
readRetries: *cacheReadRetries,
|
||||||
totalWorkers: *cacheTotalWorkers,
|
totalWorkers: *cacheTotalWorkers,
|
||||||
originalTotalWorkers: *cacheTotalWorkers,
|
totalMaxWorkers: *cacheTotalWorkers,
|
||||||
chunkMemory: !*cacheChunkNoMemory,
|
chunkMemory: !*cacheChunkNoMemory,
|
||||||
originalChunkMemory: !*cacheChunkNoMemory,
|
cacheWrites: *cacheStoreWrites,
|
||||||
warmUp: false,
|
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
||||||
warmUpRate: warmupRate,
|
lastRootCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
||||||
warmUpSec: warmupSec,
|
}
|
||||||
cacheWrites: *cacheStoreWrites,
|
if f.chunkTotalSize < (f.chunkSize * int64(f.totalWorkers)) {
|
||||||
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||||
lastRootCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
f.chunkTotalSize, f.chunkSize, f.totalWorkers)
|
||||||
lastOpenedEntries: make(map[string]time.Time),
|
|
||||||
}
|
}
|
||||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(*cacheRps)), f.totalWorkers)
|
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(*cacheRps)), f.totalWorkers)
|
||||||
|
|
||||||
|
f.plexConnector = &plexConnector{}
|
||||||
|
if plexURL != "" {
|
||||||
|
usingPlex := false
|
||||||
|
|
||||||
|
if plexToken != "" {
|
||||||
|
f.plexConnector, err = newPlexConnectorWithToken(f, plexURL, plexToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
||||||
|
}
|
||||||
|
usingPlex = true
|
||||||
|
} else {
|
||||||
|
plexUsername := fs.ConfigFileGet(name, "plex_username")
|
||||||
|
plexPassword := fs.ConfigFileGet(name, "plex_password")
|
||||||
|
if plexPassword != "" && plexUsername != "" {
|
||||||
|
decPass, err := fs.Reveal(plexPassword)
|
||||||
|
if err != nil {
|
||||||
|
decPass = plexPassword
|
||||||
|
}
|
||||||
|
f.plexConnector, err = newPlexConnector(f, plexURL, plexUsername, decPass)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
||||||
|
}
|
||||||
|
if f.plexConnector.token != "" {
|
||||||
|
fs.ConfigFileSet(name, "plex_token", f.plexConnector.token)
|
||||||
|
fs.SaveConfig()
|
||||||
|
}
|
||||||
|
usingPlex = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if usingPlex {
|
||||||
|
fs.Infof(name, "Connected to Plex server: %v", plexURL)
|
||||||
|
// when connected to a Plex server we default to 1 worker (Plex scans all the time)
|
||||||
|
// and leave max workers as a setting to scale out the workers on demand during playback
|
||||||
|
f.totalWorkers = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dbPath := *cacheDbPath
|
dbPath := *cacheDbPath
|
||||||
if filepath.Ext(dbPath) != "" {
|
if filepath.Ext(dbPath) != "" {
|
||||||
dbPath = filepath.Dir(dbPath)
|
dbPath = filepath.Dir(dbPath)
|
||||||
@ -331,7 +348,9 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||||||
|
|
||||||
dbPath = filepath.Join(dbPath, name+".db")
|
dbPath = filepath.Join(dbPath, name+".db")
|
||||||
fs.Infof(name, "Storage DB path: %v", dbPath)
|
fs.Infof(name, "Storage DB path: %v", dbPath)
|
||||||
f.cache, err = GetPersistent(dbPath, *cacheDbPurge)
|
f.cache, err = GetPersistent(dbPath, &Features{
|
||||||
|
PurgeDb: *cacheDbPurge,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||||
}
|
}
|
||||||
@ -348,9 +367,10 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||||||
|
|
||||||
fs.Infof(name, "Chunk Memory: %v", f.chunkMemory)
|
fs.Infof(name, "Chunk Memory: %v", f.chunkMemory)
|
||||||
fs.Infof(name, "Chunk Size: %v", fs.SizeSuffix(f.chunkSize))
|
fs.Infof(name, "Chunk Size: %v", fs.SizeSuffix(f.chunkSize))
|
||||||
|
fs.Infof(name, "Chunk Total Size: %v", fs.SizeSuffix(f.chunkTotalSize))
|
||||||
|
fs.Infof(name, "Chunk Clean Interval: %v", f.chunkCleanInterval.String())
|
||||||
fs.Infof(name, "Workers: %v", f.totalWorkers)
|
fs.Infof(name, "Workers: %v", f.totalWorkers)
|
||||||
fs.Infof(name, "File Age: %v", f.fileAge.String())
|
fs.Infof(name, "File Age: %v", f.fileAge.String())
|
||||||
fs.Infof(name, "Chunk Age: %v", f.chunkAge.String())
|
|
||||||
fs.Infof(name, "Cache Writes: %v", f.cacheWrites)
|
fs.Infof(name, "Cache Writes: %v", f.cacheWrites)
|
||||||
|
|
||||||
go f.CleanUpCache(false)
|
go f.CleanUpCache(false)
|
||||||
@ -412,35 +432,6 @@ func (f *Fs) ChunkSize() int64 {
|
|||||||
return f.chunkSize
|
return f.chunkSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// originalSettingWorkers will return the original value of this config
|
|
||||||
func (f *Fs) originalSettingWorkers() int {
|
|
||||||
return f.originalTotalWorkers
|
|
||||||
}
|
|
||||||
|
|
||||||
// originalSettingChunkNoMemory will return the original value of this config
|
|
||||||
func (f *Fs) originalSettingChunkNoMemory() bool {
|
|
||||||
return f.originalChunkMemory
|
|
||||||
}
|
|
||||||
|
|
||||||
// InWarmUp says if cache warm up is active
|
|
||||||
func (f *Fs) InWarmUp() bool {
|
|
||||||
return f.warmUp
|
|
||||||
}
|
|
||||||
|
|
||||||
// enableWarmUp will enable the warm up state of this cache along with the relevant settings
|
|
||||||
func (f *Fs) enableWarmUp() {
|
|
||||||
f.totalWorkers = 1
|
|
||||||
f.chunkMemory = false
|
|
||||||
f.warmUp = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// disableWarmUp will disable the warm up state of this cache along with the relevant settings
|
|
||||||
func (f *Fs) disableWarmUp() {
|
|
||||||
f.totalWorkers = f.originalSettingWorkers()
|
|
||||||
f.chunkMemory = !f.originalSettingChunkNoMemory()
|
|
||||||
f.warmUp = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
// NewObject finds the Object at remote.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||||
co := NewObject(f, remote)
|
co := NewObject(f, remote)
|
||||||
@ -668,7 +659,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
// if we have some bytes we cache them
|
// if we have some bytes we cache them
|
||||||
if readSize > 0 {
|
if readSize > 0 {
|
||||||
chunk = chunk[:readSize]
|
chunk = chunk[:readSize]
|
||||||
err2 := f.cache.AddChunkAhead(cleanPath(path.Join(f.root, src.Remote())), chunk, offset, f.metaAge)
|
err2 := f.cache.AddChunk(cleanPath(path.Join(f.root, src.Remote())), chunk, offset)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
fs.Errorf(src, "error saving new data in cache '%v'", err2)
|
fs.Errorf(src, "error saving new data in cache '%v'", err2)
|
||||||
_ = pr.CloseWithError(err2)
|
_ = pr.CloseWithError(err2)
|
||||||
@ -840,10 +831,6 @@ func (f *Fs) Purge() error {
|
|||||||
fs.Infof(f, "purging cache")
|
fs.Infof(f, "purging cache")
|
||||||
f.cache.Purge()
|
f.cache.Purge()
|
||||||
|
|
||||||
f.warmupMu.Lock()
|
|
||||||
defer f.warmupMu.Unlock()
|
|
||||||
f.lastOpenedEntries = make(map[string]time.Time)
|
|
||||||
|
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil
|
return nil
|
||||||
@ -892,45 +879,17 @@ func (f *Fs) OpenRateLimited(fn func() (io.ReadCloser, error)) (io.ReadCloser, e
|
|||||||
return fn()
|
return fn()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckIfWarmupNeeded changes the FS settings during warmups
|
|
||||||
func (f *Fs) CheckIfWarmupNeeded(remote string) {
|
|
||||||
f.warmupMu.Lock()
|
|
||||||
defer f.warmupMu.Unlock()
|
|
||||||
|
|
||||||
secondCount := time.Duration(f.warmUpSec)
|
|
||||||
rate := f.warmUpRate
|
|
||||||
|
|
||||||
// clean up entries older than the needed time frame needed
|
|
||||||
for k, v := range f.lastOpenedEntries {
|
|
||||||
if time.Now().After(v.Add(time.Second * secondCount)) {
|
|
||||||
delete(f.lastOpenedEntries, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.lastOpenedEntries[remote] = time.Now()
|
|
||||||
|
|
||||||
// simple check for the current load
|
|
||||||
if len(f.lastOpenedEntries) >= rate && !f.warmUp {
|
|
||||||
fs.Infof(f, "turning on cache warmup")
|
|
||||||
f.enableWarmUp()
|
|
||||||
} else if len(f.lastOpenedEntries) < rate && f.warmUp {
|
|
||||||
fs.Infof(f, "turning off cache warmup")
|
|
||||||
f.disableWarmUp()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUpCache will cleanup only the cache data that is expired
|
// CleanUpCache will cleanup only the cache data that is expired
|
||||||
func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||||
f.cleanupMu.Lock()
|
f.cleanupMu.Lock()
|
||||||
defer f.cleanupMu.Unlock()
|
defer f.cleanupMu.Unlock()
|
||||||
|
|
||||||
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(f.chunkAge/4)) {
|
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(f.chunkCleanInterval)) {
|
||||||
fs.Infof("cache", "running chunks cleanup")
|
f.cache.CleanChunksBySize(f.chunkTotalSize)
|
||||||
f.cache.CleanChunksByAge(f.chunkAge)
|
|
||||||
f.lastChunkCleanup = time.Now()
|
f.lastChunkCleanup = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
if ignoreLastTs || time.Now().After(f.lastRootCleanup.Add(f.fileAge/4)) {
|
if ignoreLastTs || time.Now().After(f.lastRootCleanup.Add(f.fileAge/4)) {
|
||||||
fs.Infof("cache", "running root cleanup")
|
|
||||||
f.cache.CleanEntriesByAge(f.fileAge)
|
f.cache.CleanEntriesByAge(f.fileAge)
|
||||||
f.lastRootCleanup = time.Now()
|
f.lastRootCleanup = time.Now()
|
||||||
}
|
}
|
||||||
@ -951,6 +910,15 @@ func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|||||||
f.wrapper = wrapper
|
f.wrapper = wrapper
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wrap returns the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) isWrappedByCrypt() (*crypt.Fs, bool) {
|
||||||
|
if f.wrapper == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
c, ok := f.wrapper.(*crypt.Fs)
|
||||||
|
return c, ok
|
||||||
|
}
|
||||||
|
|
||||||
// DirCacheFlush flushes the dir cache
|
// DirCacheFlush flushes the dir cache
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
_ = f.cache.RemoveDir("")
|
_ = f.cache.RemoveDir("")
|
||||||
|
224
cache/cache_internal_test.go
vendored
224
cache/cache_internal_test.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@ -24,18 +24,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
WrapRemote = flag.String("wrap-remote", "", "Remote to wrap")
|
WrapRemote = flag.String("wrap-remote", "", "Remote to wrap")
|
||||||
RemoteName = flag.String("remote-name", "TestCacheInternal", "Root remote")
|
RemoteName = flag.String("remote-name", "TestCacheInternal", "Root remote")
|
||||||
SkipTimeouts = flag.Bool("skip-waits", false, "To skip tests that have wait times")
|
rootFs fs.Fs
|
||||||
rootFs fs.Fs
|
boltDb *cache.Persistent
|
||||||
boltDb *cache.Persistent
|
infoAge = time.Second * 10
|
||||||
metaAge = time.Second * 30
|
chunkClean = time.Second
|
||||||
infoAge = time.Second * 10
|
okDiff = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
|
||||||
chunkAge = time.Second * 10
|
workers = 2
|
||||||
okDiff = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
|
|
||||||
workers = 2
|
|
||||||
warmupRate = 3
|
|
||||||
warmupSec = 10
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// prepare the test server and return a function to tidy it up afterwards
|
// prepare the test server and return a function to tidy it up afterwards
|
||||||
@ -44,7 +40,7 @@ func TestInternalInit(t *testing.T) {
|
|||||||
|
|
||||||
// delete the default path
|
// delete the default path
|
||||||
dbPath := filepath.Join(fs.CacheDir, "cache-backend", *RemoteName+".db")
|
dbPath := filepath.Join(fs.CacheDir, "cache-backend", *RemoteName+".db")
|
||||||
boltDb, err = cache.GetPersistent(dbPath, true)
|
boltDb, err = cache.GetPersistent(dbPath, &cache.Features{PurgeDb: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fstest.Initialise()
|
fstest.Initialise()
|
||||||
|
|
||||||
@ -65,17 +61,17 @@ func TestInternalInit(t *testing.T) {
|
|||||||
fs.ConfigFileSet(*RemoteName, "type", "cache")
|
fs.ConfigFileSet(*RemoteName, "type", "cache")
|
||||||
fs.ConfigFileSet(*RemoteName, "remote", *WrapRemote)
|
fs.ConfigFileSet(*RemoteName, "remote", *WrapRemote)
|
||||||
fs.ConfigFileSet(*RemoteName, "chunk_size", "1024")
|
fs.ConfigFileSet(*RemoteName, "chunk_size", "1024")
|
||||||
fs.ConfigFileSet(*RemoteName, "chunk_age", chunkAge.String())
|
fs.ConfigFileSet(*RemoteName, "chunk_total_size", "2048")
|
||||||
fs.ConfigFileSet(*RemoteName, "info_age", infoAge.String())
|
fs.ConfigFileSet(*RemoteName, "info_age", infoAge.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = flag.Set("cache-warm-up-age", metaAge.String())
|
|
||||||
_ = flag.Set("cache-warm-up-rps", fmt.Sprintf("%v/%v", warmupRate, warmupSec))
|
|
||||||
_ = flag.Set("cache-chunk-no-memory", "true")
|
_ = flag.Set("cache-chunk-no-memory", "true")
|
||||||
_ = flag.Set("cache-workers", strconv.Itoa(workers))
|
_ = flag.Set("cache-workers", strconv.Itoa(workers))
|
||||||
|
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
|
||||||
|
|
||||||
// Instantiate root
|
// Instantiate root
|
||||||
rootFs, err = fs.NewFs(*RemoteName + ":")
|
rootFs, err = fs.NewFs(*RemoteName + ":")
|
||||||
|
require.NoError(t, err)
|
||||||
_ = rootFs.Features().Purge()
|
_ = rootFs.Features().Purge()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("")
|
err = rootFs.Mkdir("")
|
||||||
@ -305,143 +301,6 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||||||
require.Equal(t, o.ModTime(), co.ModTime())
|
require.Equal(t, o.ModTime(), co.ModTime())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalWarmUp(t *testing.T) {
|
|
||||||
if *SkipTimeouts {
|
|
||||||
t.Skip("--skip-waits set")
|
|
||||||
}
|
|
||||||
|
|
||||||
reset(t)
|
|
||||||
cfs, err := getCacheFs(rootFs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
chunkSize := cfs.ChunkSize()
|
|
||||||
|
|
||||||
o1 := writeObjectRandomBytes(t, rootFs, (chunkSize * 3))
|
|
||||||
o2 := writeObjectRandomBytes(t, rootFs, (chunkSize * 4))
|
|
||||||
o3 := writeObjectRandomBytes(t, rootFs, (chunkSize * 6))
|
|
||||||
|
|
||||||
_ = readDataFromObj(t, o1, 0, chunkSize, false)
|
|
||||||
_ = readDataFromObj(t, o2, 0, chunkSize, false)
|
|
||||||
|
|
||||||
// validate a fresh chunk
|
|
||||||
expectedExpiry := time.Now().Add(chunkAge)
|
|
||||||
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), o2.Remote()), 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
|
||||||
|
|
||||||
// validate that we entered a warm up state
|
|
||||||
_ = readDataFromObj(t, o3, 0, chunkSize, false)
|
|
||||||
require.True(t, cfs.InWarmUp())
|
|
||||||
expectedExpiry = time.Now().Add(metaAge)
|
|
||||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
|
||||||
|
|
||||||
// validate that we cooled down and exit warm up
|
|
||||||
// we wait for the cache to expire
|
|
||||||
t.Logf("Waiting 10 seconds for warm up to expire\n")
|
|
||||||
time.Sleep(time.Second * 10)
|
|
||||||
|
|
||||||
_ = readDataFromObj(t, o3, chunkSize, chunkSize*2, false)
|
|
||||||
require.False(t, cfs.InWarmUp())
|
|
||||||
expectedExpiry = time.Now().Add(chunkAge)
|
|
||||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), chunkSize)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInternalWarmUpInFlight(t *testing.T) {
|
|
||||||
if *SkipTimeouts {
|
|
||||||
t.Skip("--skip-waits set")
|
|
||||||
}
|
|
||||||
|
|
||||||
reset(t)
|
|
||||||
cfs, err := getCacheFs(rootFs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
chunkSize := cfs.ChunkSize()
|
|
||||||
|
|
||||||
o1 := writeObjectRandomBytes(t, rootFs, (chunkSize * 3))
|
|
||||||
o2 := writeObjectRandomBytes(t, rootFs, (chunkSize * 4))
|
|
||||||
o3 := writeObjectRandomBytes(t, rootFs, (chunkSize * int64(workers) * int64(2)))
|
|
||||||
|
|
||||||
_ = readDataFromObj(t, o1, 0, chunkSize, false)
|
|
||||||
_ = readDataFromObj(t, o2, 0, chunkSize, false)
|
|
||||||
require.False(t, cfs.InWarmUp())
|
|
||||||
|
|
||||||
// validate that we entered a warm up state
|
|
||||||
_ = readDataFromObj(t, o3, 0, chunkSize, false)
|
|
||||||
require.True(t, cfs.InWarmUp())
|
|
||||||
expectedExpiry := time.Now().Add(metaAge)
|
|
||||||
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
|
||||||
|
|
||||||
checkSample := make([]byte, chunkSize)
|
|
||||||
reader, err := o3.Open(&fs.SeekOption{Offset: 0})
|
|
||||||
require.NoError(t, err)
|
|
||||||
rs, ok := reader.(*cache.Handle)
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
for i := 0; i <= workers; i++ {
|
|
||||||
_, _ = rs.Seek(int64(i)*chunkSize, 0)
|
|
||||||
_, err = io.ReadFull(reader, checkSample)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if i == workers {
|
|
||||||
require.False(t, rs.InWarmUp(), "iteration %v", i)
|
|
||||||
} else {
|
|
||||||
require.True(t, rs.InWarmUp(), "iteration %v", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ = reader.Close()
|
|
||||||
require.True(t, cfs.InWarmUp())
|
|
||||||
expectedExpiry = time.Now().Add(chunkAge)
|
|
||||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), chunkSize*int64(workers+1))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
|
||||||
|
|
||||||
// validate that we cooled down and exit warm up
|
|
||||||
// we wait for the cache to expire
|
|
||||||
t.Logf("Waiting 10 seconds for warm up to expire\n")
|
|
||||||
time.Sleep(time.Second * 10)
|
|
||||||
|
|
||||||
_ = readDataFromObj(t, o2, chunkSize, chunkSize*2, false)
|
|
||||||
require.False(t, cfs.InWarmUp())
|
|
||||||
expectedExpiry = time.Now().Add(chunkAge)
|
|
||||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o2.Remote()), chunkSize)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: this is bugged
|
|
||||||
//func TestInternalRateLimiter(t *testing.T) {
|
|
||||||
// reset(t)
|
|
||||||
// _ = flag.Set("cache-rps", "2")
|
|
||||||
// rootFs, err := fs.NewFs(*RemoteName + ":")
|
|
||||||
// require.NoError(t, err)
|
|
||||||
// defer func() {
|
|
||||||
// _ = flag.Set("cache-rps", "-1")
|
|
||||||
// rootFs, err = fs.NewFs(*RemoteName + ":")
|
|
||||||
// require.NoError(t, err)
|
|
||||||
// }()
|
|
||||||
// cfs, err := getCacheFs(rootFs)
|
|
||||||
// require.NoError(t, err)
|
|
||||||
// chunkSize := cfs.ChunkSize()
|
|
||||||
//
|
|
||||||
// // create some rand test data
|
|
||||||
// co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
|
||||||
//
|
|
||||||
// doStuff(t, 5, time.Second, func() {
|
|
||||||
// r, err := co.Open(&fs.SeekOption{Offset: chunkSize + 1})
|
|
||||||
// require.NoError(t, err)
|
|
||||||
//
|
|
||||||
// buf := make([]byte, chunkSize)
|
|
||||||
// totalRead, err := io.ReadFull(r, buf)
|
|
||||||
// require.NoError(t, err)
|
|
||||||
// require.Equal(t, len(buf), totalRead)
|
|
||||||
// _ = r.Close()
|
|
||||||
// })
|
|
||||||
//}
|
|
||||||
|
|
||||||
func TestInternalCacheWrites(t *testing.T) {
|
func TestInternalCacheWrites(t *testing.T) {
|
||||||
reset(t)
|
reset(t)
|
||||||
_ = flag.Set("cache-writes", "true")
|
_ = flag.Set("cache-writes", "true")
|
||||||
@ -453,10 +312,10 @@ func TestInternalCacheWrites(t *testing.T) {
|
|||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
||||||
expectedExpiry := time.Now().Add(metaAge)
|
expectedTs := time.Now()
|
||||||
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), co.Remote()), 0)
|
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), co.Remote()), 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
require.WithinDuration(t, expectedTs, ts, okDiff)
|
||||||
|
|
||||||
// reset fs
|
// reset fs
|
||||||
_ = flag.Set("cache-writes", "false")
|
_ = flag.Set("cache-writes", "false")
|
||||||
@ -464,43 +323,44 @@ func TestInternalCacheWrites(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalExpiredChunkRemoved(t *testing.T) {
|
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||||
t.Skip("FIXME disabled because it is unreliable")
|
|
||||||
|
|
||||||
if *SkipTimeouts {
|
|
||||||
t.Skip("--skip-waits set")
|
|
||||||
}
|
|
||||||
|
|
||||||
reset(t)
|
reset(t)
|
||||||
|
_ = flag.Set("cache-workers", "1")
|
||||||
|
rootFs, err := fs.NewFs(*RemoteName + ":")
|
||||||
|
require.NoError(t, err)
|
||||||
cfs, err := getCacheFs(rootFs)
|
cfs, err := getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
chunkSize := cfs.ChunkSize()
|
chunkSize := cfs.ChunkSize()
|
||||||
totalChunks := 20
|
totalChunks := 20
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
co := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
|
o := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||||
remote := co.Remote()
|
co, ok := o.(*cache.Object)
|
||||||
// cache all the chunks
|
|
||||||
_ = readDataFromObj(t, co, 0, co.Size(), false)
|
|
||||||
|
|
||||||
// we wait for the cache to expire
|
|
||||||
t.Logf("Waiting %v for cache to expire\n", chunkAge.String())
|
|
||||||
time.Sleep(chunkAge)
|
|
||||||
_, _ = cfs.List("")
|
|
||||||
time.Sleep(time.Second * 2)
|
|
||||||
|
|
||||||
o, err := cfs.NewObject(remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
co2, ok := o.(*cache.Object)
|
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
require.False(t, boltDb.HasChunk(co2, 0))
|
|
||||||
|
for i := 0; i < 4; i++ { // read first 4
|
||||||
|
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||||
|
}
|
||||||
|
cfs.CleanUpCache(true)
|
||||||
|
// the last 2 **must** be in the cache
|
||||||
|
require.True(t, boltDb.HasChunk(co, chunkSize*2))
|
||||||
|
require.True(t, boltDb.HasChunk(co, chunkSize*3))
|
||||||
|
|
||||||
|
for i := 4; i < 6; i++ { // read next 2
|
||||||
|
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||||
|
}
|
||||||
|
cfs.CleanUpCache(true)
|
||||||
|
// the last 2 **must** be in the cache
|
||||||
|
require.True(t, boltDb.HasChunk(co, chunkSize*4))
|
||||||
|
require.True(t, boltDb.HasChunk(co, chunkSize*5))
|
||||||
|
|
||||||
|
// reset fs
|
||||||
|
_ = flag.Set("cache-workers", strconv.Itoa(workers))
|
||||||
|
rootFs, err = fs.NewFs(*RemoteName + ":")
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||||
if *SkipTimeouts {
|
|
||||||
t.Skip("--skip-waits set")
|
|
||||||
}
|
|
||||||
|
|
||||||
reset(t)
|
reset(t)
|
||||||
cfs, err := getCacheFs(rootFs)
|
cfs, err := getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
2
cache/cache_test.go
vendored
2
cache/cache_test.go
vendored
@ -3,7 +3,7 @@
|
|||||||
// Automatically generated - DO NOT EDIT
|
// Automatically generated - DO NOT EDIT
|
||||||
// Regenerate with: make gen_tests
|
// Regenerate with: make gen_tests
|
||||||
|
|
||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
2
cache/cache_unsupported.go
vendored
2
cache/cache_unsupported.go
vendored
@ -1,6 +1,6 @@
|
|||||||
// Build for cache for unsupported platforms to stop go complaining
|
// Build for cache for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build plan9
|
// +build plan9 !go1.7
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
2
cache/directory.go
vendored
2
cache/directory.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
253
cache/handle.go
vendored
253
cache/handle.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@ -15,21 +15,19 @@ import (
|
|||||||
|
|
||||||
// Handle is managing the read/write/seek operations on an open handle
|
// Handle is managing the read/write/seek operations on an open handle
|
||||||
type Handle struct {
|
type Handle struct {
|
||||||
cachedObject *Object
|
cachedObject *Object
|
||||||
memory ChunkStorage
|
memory ChunkStorage
|
||||||
preloadQueue chan int64
|
preloadQueue chan int64
|
||||||
preloadOffset int64
|
preloadOffset int64
|
||||||
offset int64
|
offset int64
|
||||||
seenOffsets map[int64]bool
|
seenOffsets map[int64]bool
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
confirmReading chan bool
|
||||||
|
|
||||||
ReadRetries int
|
UseMemory bool
|
||||||
TotalWorkers int
|
workers []*worker
|
||||||
UseMemory bool
|
closed bool
|
||||||
workers []*worker
|
reading bool
|
||||||
chunkAge time.Duration
|
|
||||||
warmup bool
|
|
||||||
closed bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewObjectHandle returns a new Handle for an existing Object
|
// NewObjectHandle returns a new Handle for an existing Object
|
||||||
@ -39,20 +37,15 @@ func NewObjectHandle(o *Object) *Handle {
|
|||||||
offset: 0,
|
offset: 0,
|
||||||
preloadOffset: -1, // -1 to trigger the first preload
|
preloadOffset: -1, // -1 to trigger the first preload
|
||||||
|
|
||||||
ReadRetries: o.CacheFs.readRetries,
|
UseMemory: o.CacheFs.chunkMemory,
|
||||||
TotalWorkers: o.CacheFs.totalWorkers,
|
reading: false,
|
||||||
UseMemory: o.CacheFs.chunkMemory,
|
|
||||||
chunkAge: o.CacheFs.chunkAge,
|
|
||||||
warmup: o.CacheFs.InWarmUp(),
|
|
||||||
}
|
}
|
||||||
r.seenOffsets = make(map[int64]bool)
|
r.seenOffsets = make(map[int64]bool)
|
||||||
r.memory = NewMemory(o.CacheFs.chunkAge)
|
r.memory = NewMemory(-1)
|
||||||
if o.CacheFs.InWarmUp() {
|
|
||||||
r.chunkAge = o.CacheFs.metaAge
|
|
||||||
}
|
|
||||||
|
|
||||||
// create a larger buffer to queue up requests
|
// create a larger buffer to queue up requests
|
||||||
r.preloadQueue = make(chan int64, r.TotalWorkers*10)
|
r.preloadQueue = make(chan int64, o.CacheFs.totalWorkers*10)
|
||||||
|
r.confirmReading = make(chan bool)
|
||||||
r.startReadWorkers()
|
r.startReadWorkers()
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
@ -72,34 +65,86 @@ func (r *Handle) String() string {
|
|||||||
return r.cachedObject.abs()
|
return r.cachedObject.abs()
|
||||||
}
|
}
|
||||||
|
|
||||||
// InWarmUp says if this handle is in warmup mode
|
|
||||||
func (r *Handle) InWarmUp() bool {
|
|
||||||
return r.warmup
|
|
||||||
}
|
|
||||||
|
|
||||||
// startReadWorkers will start the worker pool
|
// startReadWorkers will start the worker pool
|
||||||
func (r *Handle) startReadWorkers() {
|
func (r *Handle) startReadWorkers() {
|
||||||
if r.hasAtLeastOneWorker() {
|
if r.hasAtLeastOneWorker() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := 0; i < r.TotalWorkers; i++ {
|
r.scaleWorkers(r.cacheFs().totalWorkers)
|
||||||
w := &worker{
|
}
|
||||||
r: r,
|
|
||||||
ch: r.preloadQueue,
|
|
||||||
id: i,
|
|
||||||
}
|
|
||||||
go w.run()
|
|
||||||
|
|
||||||
r.workers = append(r.workers, w)
|
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||||
|
func (r *Handle) scaleWorkers(desired int) {
|
||||||
|
current := len(r.workers)
|
||||||
|
if current == desired {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
if current > desired {
|
||||||
|
// scale in gracefully
|
||||||
|
for i := 0; i < current-desired; i++ {
|
||||||
|
r.preloadQueue <- -1
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// scale out
|
||||||
|
for i := 0; i < desired-current; i++ {
|
||||||
|
w := &worker{
|
||||||
|
r: r,
|
||||||
|
ch: r.preloadQueue,
|
||||||
|
id: current + i,
|
||||||
|
}
|
||||||
|
go w.run()
|
||||||
|
|
||||||
|
r.workers = append(r.workers, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ignore first scale out from 0
|
||||||
|
if current != 0 {
|
||||||
|
fs.Infof(r, "scale workers to %v", desired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Handle) requestExternalConfirmation() {
|
||||||
|
// if there's no external confirmation available
|
||||||
|
// then we skip this step
|
||||||
|
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||||
|
!r.cacheFs().plexConnector.isConnected() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
go r.cacheFs().plexConnector.isPlayingAsync(r.cachedObject, r.confirmReading)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Handle) confirmExternalReading() {
|
||||||
|
// if we have a max value of workers
|
||||||
|
// or there's no external confirmation available
|
||||||
|
// then we skip this step
|
||||||
|
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||||
|
!r.cacheFs().plexConnector.isConnected() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case confirmed := <-r.confirmReading:
|
||||||
|
if !confirmed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Infof(r, "confirmed reading by external reader")
|
||||||
|
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// queueOffset will send an offset to the workers if it's different from the last one
|
// queueOffset will send an offset to the workers if it's different from the last one
|
||||||
func (r *Handle) queueOffset(offset int64) {
|
func (r *Handle) queueOffset(offset int64) {
|
||||||
if offset != r.preloadOffset {
|
if offset != r.preloadOffset {
|
||||||
|
// clean past in-memory chunks
|
||||||
|
if r.UseMemory {
|
||||||
|
go r.memory.CleanChunksByNeed(offset)
|
||||||
|
}
|
||||||
|
go r.cacheFs().CleanUpCache(false)
|
||||||
|
r.confirmExternalReading()
|
||||||
r.preloadOffset = offset
|
r.preloadOffset = offset
|
||||||
previousChunksCounter := 0
|
|
||||||
maxOffset := r.cacheFs().chunkSize * int64(r.cacheFs().originalSettingWorkers())
|
|
||||||
|
|
||||||
// clear the past seen chunks
|
// clear the past seen chunks
|
||||||
// they will remain in our persistent storage but will be removed from transient
|
// they will remain in our persistent storage but will be removed from transient
|
||||||
@ -107,25 +152,10 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
for k := range r.seenOffsets {
|
for k := range r.seenOffsets {
|
||||||
if k < offset {
|
if k < offset {
|
||||||
r.seenOffsets[k] = false
|
r.seenOffsets[k] = false
|
||||||
|
|
||||||
// we count how many continuous chunks were seen before
|
|
||||||
if offset >= maxOffset && k >= offset-maxOffset {
|
|
||||||
previousChunksCounter++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we read all the previous chunks that could have been preloaded
|
for i := 0; i < len(r.workers); i++ {
|
||||||
// we should then disable warm up setting for this handle
|
|
||||||
if r.warmup && previousChunksCounter >= r.cacheFs().originalSettingWorkers() {
|
|
||||||
r.TotalWorkers = r.cacheFs().originalSettingWorkers()
|
|
||||||
r.UseMemory = !r.cacheFs().originalSettingChunkNoMemory()
|
|
||||||
r.chunkAge = r.cacheFs().chunkAge
|
|
||||||
r.warmup = false
|
|
||||||
fs.Infof(r, "disabling warm up")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < r.TotalWorkers; i++ {
|
|
||||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||||
if o < 0 || o >= r.cachedObject.Size() {
|
if o < 0 || o >= r.cachedObject.Size() {
|
||||||
continue
|
continue
|
||||||
@ -137,6 +167,8 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
r.seenOffsets[o] = true
|
r.seenOffsets[o] = true
|
||||||
r.preloadQueue <- o
|
r.preloadQueue <- o
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r.requestExternalConfirmation()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,12 +189,6 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
var data []byte
|
var data []byte
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// we reached the end of the file
|
|
||||||
if chunkStart >= r.cachedObject.Size() {
|
|
||||||
fs.Debugf(r, "reached EOF %v", chunkStart)
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// we calculate the modulus of the requested offset with the size of a chunk
|
// we calculate the modulus of the requested offset with the size of a chunk
|
||||||
offset := chunkStart % r.cacheFs().chunkSize
|
offset := chunkStart % r.cacheFs().chunkSize
|
||||||
|
|
||||||
@ -171,10 +197,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
// delete old chunks from memory
|
|
||||||
if r.UseMemory {
|
if r.UseMemory {
|
||||||
go r.memory.CleanChunksByNeed(chunkStart)
|
|
||||||
|
|
||||||
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
|
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found = true
|
found = true
|
||||||
@ -184,16 +207,15 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
if !found {
|
if !found {
|
||||||
// we're gonna give the workers a chance to pickup the chunk
|
// we're gonna give the workers a chance to pickup the chunk
|
||||||
// and retry a couple of times
|
// and retry a couple of times
|
||||||
for i := 0; i < r.ReadRetries; i++ {
|
for i := 0; i < r.cacheFs().readRetries*2; i++ {
|
||||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
|
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Millisecond * 500)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,17 +244,24 @@ func (r *Handle) Read(p []byte) (n int, err error) {
|
|||||||
defer r.mu.Unlock()
|
defer r.mu.Unlock()
|
||||||
var buf []byte
|
var buf []byte
|
||||||
|
|
||||||
|
// first reading
|
||||||
|
if !r.reading {
|
||||||
|
r.reading = true
|
||||||
|
r.requestExternalConfirmation()
|
||||||
|
}
|
||||||
|
// reached EOF
|
||||||
|
if r.offset >= r.cachedObject.Size() {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
currentOffset := r.offset
|
currentOffset := r.offset
|
||||||
buf, err = r.getChunk(currentOffset)
|
buf, err = r.getChunk(currentOffset)
|
||||||
if err != nil && len(buf) == 0 {
|
if err != nil && len(buf) == 0 {
|
||||||
|
fs.Errorf(r, "(%v/%v) empty and error (%v) response", currentOffset, r.cachedObject.Size(), err)
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
readSize := copy(p, buf)
|
readSize := copy(p, buf)
|
||||||
newOffset := currentOffset + int64(readSize)
|
newOffset := currentOffset + int64(readSize)
|
||||||
r.offset = newOffset
|
r.offset = newOffset
|
||||||
if r.offset >= r.cachedObject.Size() {
|
|
||||||
err = io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
return readSize, err
|
return readSize, err
|
||||||
}
|
}
|
||||||
@ -257,6 +286,7 @@ func (r *Handle) Close() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go r.cacheFs().CleanUpCache(false)
|
||||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -313,7 +343,7 @@ func (w *worker) reader(offset, end int64) (io.ReadCloser, error) {
|
|||||||
r := w.rc
|
r := w.rc
|
||||||
if w.rc == nil {
|
if w.rc == nil {
|
||||||
r, err = w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
|
r, err = w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
|
||||||
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end}, &fs.SeekOption{Offset: offset})
|
return w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -329,7 +359,7 @@ func (w *worker) reader(offset, end int64) (io.ReadCloser, error) {
|
|||||||
|
|
||||||
_ = w.rc.Close()
|
_ = w.rc.Close()
|
||||||
return w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
|
return w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
|
||||||
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end}, &fs.SeekOption{Offset: offset})
|
r, err = w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -377,7 +407,7 @@ func (w *worker) run() {
|
|||||||
// add it in ram if it's in the persistent storage
|
// add it in ram if it's in the persistent storage
|
||||||
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
|
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = w.r.memory.AddChunk(w.r.cachedObject, data, chunkStart)
|
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||||
} else {
|
} else {
|
||||||
@ -395,37 +425,56 @@ func (w *worker) run() {
|
|||||||
if chunkEnd > w.r.cachedObject.Size() {
|
if chunkEnd > w.r.cachedObject.Size() {
|
||||||
chunkEnd = w.r.cachedObject.Size()
|
chunkEnd = w.r.cachedObject.Size()
|
||||||
}
|
}
|
||||||
w.rc, err = w.reader(chunkStart, chunkEnd)
|
|
||||||
// we seem to be getting only errors so we abort
|
w.download(chunkStart, chunkEnd, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||||
|
var err error
|
||||||
|
var data []byte
|
||||||
|
|
||||||
|
// stop retries
|
||||||
|
if retry >= w.r.cacheFs().readRetries {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// back-off between retries
|
||||||
|
if retry > 0 {
|
||||||
|
time.Sleep(time.Second * time.Duration(retry))
|
||||||
|
}
|
||||||
|
|
||||||
|
w.rc, err = w.reader(chunkStart, chunkEnd)
|
||||||
|
// we seem to be getting only errors so we abort
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||||
|
w.download(chunkStart, chunkEnd, retry+1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data = make([]byte, chunkEnd-chunkStart)
|
||||||
|
sourceRead := 0
|
||||||
|
sourceRead, err = io.ReadFull(w.rc, data)
|
||||||
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||||
|
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||||
|
w.download(chunkStart, chunkEnd, retry+1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
fs.Debugf(w, "partial read chunk %v: %v", chunkStart, err)
|
||||||
|
}
|
||||||
|
data = data[:sourceRead] // reslice to remove extra garbage
|
||||||
|
fs.Debugf(w, "downloaded chunk %v", fs.SizeSuffix(chunkStart))
|
||||||
|
|
||||||
|
if w.r.UseMemory {
|
||||||
|
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data = make([]byte, chunkEnd-chunkStart)
|
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||||
sourceRead := 0
|
if err != nil {
|
||||||
sourceRead, err = io.ReadFull(w.rc, data)
|
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
|
||||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
fs.Debugf(w, "partial read chunk %v: %v", chunkStart, err)
|
|
||||||
}
|
|
||||||
data = data[:sourceRead] // reslice to remove extra garbage
|
|
||||||
fs.Debugf(w, "downloaded chunk %v", fs.SizeSuffix(chunkStart))
|
|
||||||
|
|
||||||
if w.r.UseMemory {
|
|
||||||
err = w.r.memory.AddChunk(w.r.cachedObject, data, chunkStart)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = w.r.storage().AddChunkAhead(w.r.cachedObject.abs(), data, chunkStart, w.r.chunkAge)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
14
cache/object.go
vendored
14
cache/object.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@ -205,16 +205,18 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
|||||||
if err := o.refreshFromSource(); err != nil {
|
if err := o.refreshFromSource(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
o.CacheFs.CheckIfWarmupNeeded(o.Remote())
|
|
||||||
|
|
||||||
|
var err error
|
||||||
cacheReader := NewObjectHandle(o)
|
cacheReader := NewObjectHandle(o)
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
case *fs.SeekOption:
|
case *fs.SeekOption:
|
||||||
_, err := cacheReader.Seek(x.Offset, os.SEEK_SET)
|
_, err = cacheReader.Seek(x.Offset, os.SEEK_SET)
|
||||||
if err != nil {
|
case *fs.RangeOption:
|
||||||
return cacheReader, err
|
_, err = cacheReader.Seek(x.Start, os.SEEK_SET)
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
return cacheReader, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
229
cache/plex.go
vendored
Normal file
229
cache/plex.go
vendored
Normal file
@ -0,0 +1,229 @@
|
|||||||
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// defPlexLoginURL is the default URL for Plex login
|
||||||
|
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// plexConnector is managing the cache integration with Plex
|
||||||
|
type plexConnector struct {
|
||||||
|
url *url.URL
|
||||||
|
token string
|
||||||
|
f *Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// newPlexConnector connects to a Plex server and generates a token
|
||||||
|
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||||
|
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pc := &plexConnector{
|
||||||
|
f: f,
|
||||||
|
url: u,
|
||||||
|
token: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pc.authenticate(username, password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newPlexConnector connects to a Plex server and generates a token
|
||||||
|
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||||
|
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pc := &plexConnector{
|
||||||
|
f: f,
|
||||||
|
url: u,
|
||||||
|
token: token,
|
||||||
|
}
|
||||||
|
|
||||||
|
return pc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fillDefaultHeaders will add common headers to requests
|
||||||
|
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
|
||||||
|
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
|
||||||
|
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
|
||||||
|
req.Header.Add("X-Plex-Version", fs.Version)
|
||||||
|
req.Header.Add("Accept", "application/json")
|
||||||
|
if p.token != "" {
|
||||||
|
req.Header.Add("X-Plex-Token", p.token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// authenticate will generate a token based on a username/password
|
||||||
|
func (p *plexConnector) authenticate(username, password string) error {
|
||||||
|
form := url.Values{}
|
||||||
|
form.Set("user[login]", username)
|
||||||
|
form.Add("user[password]", password)
|
||||||
|
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.fillDefaultHeaders(req)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var data map[string]interface{}
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to obtain token: %v", err)
|
||||||
|
}
|
||||||
|
tokenGen, ok := get(data, "user", "authToken")
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("failed to obtain token: %v", data)
|
||||||
|
}
|
||||||
|
token, ok := tokenGen.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("failed to obtain token: %v", data)
|
||||||
|
}
|
||||||
|
p.token = token
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isConnected checks if this Plex
|
||||||
|
func (p *plexConnector) isConnected() bool {
|
||||||
|
return p.token != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *plexConnector) isPlaying(co *Object) bool {
|
||||||
|
isPlaying := false
|
||||||
|
req, err := http.NewRequest("GET", fmt.Sprintf("%s/status/sessions", p.url.String()), nil)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
p.fillDefaultHeaders(req)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var data map[string]interface{}
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sizeGen, ok := get(data, "MediaContainer", "size")
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
size, ok := sizeGen.(float64)
|
||||||
|
if !ok || size < float64(1) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
videosGen, ok := get(data, "MediaContainer", "Video")
|
||||||
|
if !ok {
|
||||||
|
fs.Errorf("plex", "empty videos: %v", data)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
videos, ok := videosGen.([]interface{})
|
||||||
|
if !ok || len(videos) < 1 {
|
||||||
|
fs.Errorf("plex", "empty videos: %v", data)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, v := range videos {
|
||||||
|
keyGen, ok := get(v, "key")
|
||||||
|
if !ok {
|
||||||
|
fs.Errorf("plex", "failed to find: key")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key, ok := keyGen.(string)
|
||||||
|
if !ok {
|
||||||
|
fs.Errorf("plex", "failed to understand: key")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), key), nil)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
p.fillDefaultHeaders(req)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var data map[string]interface{}
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
remote := co.Remote()
|
||||||
|
if cr, yes := co.CacheFs.isWrappedByCrypt(); yes {
|
||||||
|
remote, err = cr.DecryptFileName(co.Remote())
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf("plex", "can not decrypt wrapped file: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fpGen, ok := get(data, "MediaContainer", "Metadata", 0, "Media", 0, "Part", 0, "file")
|
||||||
|
if !ok {
|
||||||
|
fs.Errorf("plex", "failed to understand: %v", data)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fp, ok := fpGen.(string)
|
||||||
|
if !ok {
|
||||||
|
fs.Errorf("plex", "failed to understand: %v", fp)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(fp, remote) {
|
||||||
|
isPlaying = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return isPlaying
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *plexConnector) isPlayingAsync(co *Object, response chan bool) {
|
||||||
|
time.Sleep(time.Second) // FIXME random guess here
|
||||||
|
res := p.isPlaying(co)
|
||||||
|
response <- res
|
||||||
|
}
|
||||||
|
|
||||||
|
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||||
|
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||||
|
for _, p := range path {
|
||||||
|
switch idx := p.(type) {
|
||||||
|
case string:
|
||||||
|
if mm, ok := m.(map[string]interface{}); ok {
|
||||||
|
if val, found := mm[idx]; found {
|
||||||
|
m = val
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
case int:
|
||||||
|
if mm, ok := m.([]interface{}); ok {
|
||||||
|
if len(mm) > idx {
|
||||||
|
m = mm[idx]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, true
|
||||||
|
}
|
11
cache/storage_memory.go
vendored
11
cache/storage_memory.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@ -58,8 +58,8 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddChunk adds a new chunk of a cached object
|
// AddChunk adds a new chunk of a cached object
|
||||||
func (m *Memory) AddChunk(cachedObject *Object, data []byte, offset int64) error {
|
func (m *Memory) AddChunk(fp string, data []byte, offset int64) error {
|
||||||
return m.AddChunkAhead(cachedObject.abs(), data, offset, time.Second)
|
return m.AddChunkAhead(fp, data, offset, time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddChunkAhead adds a new chunk of a cached object
|
// AddChunkAhead adds a new chunk of a cached object
|
||||||
@ -93,3 +93,8 @@ func (m *Memory) CleanChunksByNeed(offset int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
|
||||||
|
func (m *Memory) CleanChunksBySize(maxSize int64) {
|
||||||
|
// NOOP
|
||||||
|
}
|
||||||
|
218
cache/storage_persistent.go
vendored
218
cache/storage_persistent.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@ -29,11 +29,16 @@ const (
|
|||||||
DataTsBucket = "dataTs"
|
DataTsBucket = "dataTs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Features flags for this storage type
|
||||||
|
type Features struct {
|
||||||
|
PurgeDb bool // purge the db before starting
|
||||||
|
}
|
||||||
|
|
||||||
var boltMap = make(map[string]*Persistent)
|
var boltMap = make(map[string]*Persistent)
|
||||||
var boltMapMx sync.Mutex
|
var boltMapMx sync.Mutex
|
||||||
|
|
||||||
// GetPersistent returns a single instance for the specific store
|
// GetPersistent returns a single instance for the specific store
|
||||||
func GetPersistent(dbPath string, refreshDb bool) (*Persistent, error) {
|
func GetPersistent(dbPath string, f *Features) (*Persistent, error) {
|
||||||
// write lock to create one
|
// write lock to create one
|
||||||
boltMapMx.Lock()
|
boltMapMx.Lock()
|
||||||
defer boltMapMx.Unlock()
|
defer boltMapMx.Unlock()
|
||||||
@ -41,7 +46,7 @@ func GetPersistent(dbPath string, refreshDb bool) (*Persistent, error) {
|
|||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
bb, err := newPersistent(dbPath, refreshDb)
|
bb, err := newPersistent(dbPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -49,6 +54,12 @@ func GetPersistent(dbPath string, refreshDb bool) (*Persistent, error) {
|
|||||||
return boltMap[dbPath], nil
|
return boltMap[dbPath], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type chunkInfo struct {
|
||||||
|
Path string
|
||||||
|
Offset int64
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
// Persistent is a wrapper of persistent storage for a bolt.DB file
|
// Persistent is a wrapper of persistent storage for a bolt.DB file
|
||||||
type Persistent struct {
|
type Persistent struct {
|
||||||
Storage
|
Storage
|
||||||
@ -57,18 +68,20 @@ type Persistent struct {
|
|||||||
dataPath string
|
dataPath string
|
||||||
db *bolt.DB
|
db *bolt.DB
|
||||||
cleanupMux sync.Mutex
|
cleanupMux sync.Mutex
|
||||||
|
features *Features
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPersistent builds a new wrapper and connects to the bolt.DB file
|
// newPersistent builds a new wrapper and connects to the bolt.DB file
|
||||||
func newPersistent(dbPath string, refreshDb bool) (*Persistent, error) {
|
func newPersistent(dbPath string, f *Features) (*Persistent, error) {
|
||||||
dataPath := strings.TrimSuffix(dbPath, filepath.Ext(dbPath))
|
dataPath := strings.TrimSuffix(dbPath, filepath.Ext(dbPath))
|
||||||
|
|
||||||
b := &Persistent{
|
b := &Persistent{
|
||||||
dbPath: dbPath,
|
dbPath: dbPath,
|
||||||
dataPath: dataPath,
|
dataPath: dataPath,
|
||||||
|
features: f,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := b.Connect(refreshDb)
|
err := b.Connect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err)
|
fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -84,11 +97,11 @@ func (b *Persistent) String() string {
|
|||||||
|
|
||||||
// Connect creates a connection to the configured file
|
// Connect creates a connection to the configured file
|
||||||
// refreshDb will delete the file before to create an empty DB if it's set to true
|
// refreshDb will delete the file before to create an empty DB if it's set to true
|
||||||
func (b *Persistent) Connect(refreshDb bool) error {
|
func (b *Persistent) Connect() error {
|
||||||
var db *bolt.DB
|
var db *bolt.DB
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if refreshDb {
|
if b.features.PurgeDb {
|
||||||
err := os.Remove(b.dbPath)
|
err := os.Remove(b.dbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(b, "failed to remove cache file: %v", err)
|
fs.Errorf(b, "failed to remove cache file: %v", err)
|
||||||
@ -146,37 +159,6 @@ func (b *Persistent) getBucket(dir string, createIfMissing bool, tx *bolt.Tx) *b
|
|||||||
return bucket
|
return bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateChunkTs is a convenience method to update a chunk timestamp to mark that it was used recently
|
|
||||||
func (b *Persistent) updateChunkTs(tx *bolt.Tx, path string, offset int64, t time.Duration) {
|
|
||||||
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
|
||||||
tsVal := path + "-" + strconv.FormatInt(offset, 10)
|
|
||||||
ts := time.Now().Add(t)
|
|
||||||
found := false
|
|
||||||
|
|
||||||
// delete previous timestamps for the same object
|
|
||||||
c := tsBucket.Cursor()
|
|
||||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
||||||
if bytes.Equal(v, []byte(tsVal)) {
|
|
||||||
if tsInCache := time.Unix(0, btoi(k)); tsInCache.After(ts) && !found {
|
|
||||||
found = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err := c.Delete()
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(path, "failed to clean chunk: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if found {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := tsBucket.Put(itob(ts.UnixNano()), []byte(tsVal))
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(path, "failed to timestamp chunk: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateRootTs is a convenience method to update an object timestamp to mark that it was used recently
|
// updateRootTs is a convenience method to update an object timestamp to mark that it was used recently
|
||||||
func (b *Persistent) updateRootTs(tx *bolt.Tx, path string, t time.Duration) {
|
func (b *Persistent) updateRootTs(tx *bolt.Tx, path string, t time.Duration) {
|
||||||
tsBucket := tx.Bucket([]byte(RootTsBucket))
|
tsBucket := tx.Bucket([]byte(RootTsBucket))
|
||||||
@ -433,7 +415,6 @@ func (b *Persistent) HasChunk(cachedObject *Object, offset int64) bool {
|
|||||||
|
|
||||||
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
|
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
|
||||||
func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||||
p := cachedObject.abs()
|
|
||||||
var data []byte
|
var data []byte
|
||||||
|
|
||||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||||
@ -442,31 +423,11 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d := cachedObject.CacheFs.chunkAge
|
|
||||||
if cachedObject.CacheFs.InWarmUp() {
|
|
||||||
d = cachedObject.CacheFs.metaAge
|
|
||||||
}
|
|
||||||
|
|
||||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
|
||||||
b.updateChunkTs(tx, p, offset, d)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddChunk adds a new chunk of a cached object
|
// AddChunk adds a new chunk of a cached object
|
||||||
func (b *Persistent) AddChunk(cachedObject *Object, data []byte, offset int64) error {
|
func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
||||||
t := cachedObject.CacheFs.chunkAge
|
|
||||||
if cachedObject.CacheFs.InWarmUp() {
|
|
||||||
t = cachedObject.CacheFs.metaAge
|
|
||||||
}
|
|
||||||
return b.AddChunkAhead(cachedObject.abs(), data, offset, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddChunkAhead adds a new chunk before caching an Object for it
|
|
||||||
// see fs.cacheWrites
|
|
||||||
func (b *Persistent) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error {
|
|
||||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||||
|
|
||||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||||
@ -476,47 +437,101 @@ func (b *Persistent) AddChunkAhead(fp string, data []byte, offset int64, t time.
|
|||||||
}
|
}
|
||||||
|
|
||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
b.updateChunkTs(tx, fp, offset, t)
|
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||||
|
ts := time.Now()
|
||||||
|
found := false
|
||||||
|
|
||||||
|
// delete (older) timestamps for the same object
|
||||||
|
c := tsBucket.Cursor()
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
var ci chunkInfo
|
||||||
|
err = json.Unmarshal(v, &ci)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ci.Path == fp && ci.Offset == offset {
|
||||||
|
if tsInCache := time.Unix(0, btoi(k)); tsInCache.After(ts) && !found {
|
||||||
|
found = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := c.Delete()
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(fp, "failed to clean chunk: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// don't overwrite if a newer one is already there
|
||||||
|
if found {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
enc, err := json.Marshal(chunkInfo{Path: fp, Offset: offset, Size: int64(len(data))})
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
|
||||||
|
}
|
||||||
|
err = tsBucket.Put(itob(ts.UnixNano()), enc)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanChunksByAge will cleanup on a cron basis
|
// CleanChunksByAge will cleanup on a cron basis
|
||||||
func (b *Persistent) CleanChunksByAge(chunkAge time.Duration) {
|
func (b *Persistent) CleanChunksByAge(chunkAge time.Duration) {
|
||||||
|
// NOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanChunksByNeed is a noop for this implementation
|
||||||
|
func (b *Persistent) CleanChunksByNeed(offset int64) {
|
||||||
|
// noop: we want to clean a Bolt DB by time only
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
|
||||||
|
func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||||
b.cleanupMux.Lock()
|
b.cleanupMux.Lock()
|
||||||
defer b.cleanupMux.Unlock()
|
defer b.cleanupMux.Unlock()
|
||||||
var cntChunks int
|
var cntChunks int
|
||||||
|
|
||||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||||
min := itob(0)
|
|
||||||
max := itob(time.Now().UnixNano())
|
|
||||||
|
|
||||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||||
if dataTsBucket == nil {
|
if dataTsBucket == nil {
|
||||||
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||||
}
|
}
|
||||||
// iterate through ts
|
// iterate through ts
|
||||||
c := dataTsBucket.Cursor()
|
c := dataTsBucket.Cursor()
|
||||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
totalSize := int64(0)
|
||||||
if v == nil {
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
continue
|
var ci chunkInfo
|
||||||
}
|
err := json.Unmarshal(v, &ci)
|
||||||
// split to get (abs path - offset)
|
|
||||||
val := string(v[:])
|
|
||||||
sepIdx := strings.LastIndex(val, "-")
|
|
||||||
pathCmp := val[:sepIdx]
|
|
||||||
offsetCmp := val[sepIdx+1:]
|
|
||||||
|
|
||||||
// delete this ts entry
|
|
||||||
err := c.Delete()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(pathCmp, "failed deleting chunk ts during cleanup (%v): %v", val, err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.Remove(path.Join(b.dataPath, pathCmp, offsetCmp))
|
totalSize += ci.Size
|
||||||
if err == nil {
|
}
|
||||||
cntChunks = cntChunks + 1
|
|
||||||
|
if totalSize > maxSize {
|
||||||
|
needToClean := totalSize - maxSize
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
var ci chunkInfo
|
||||||
|
err := json.Unmarshal(v, &ci)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// delete this ts entry
|
||||||
|
err = c.Delete()
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(ci.Path, "failed deleting chunk ts during cleanup (%v): %v", ci.Offset, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = os.Remove(path.Join(b.dataPath, ci.Path, strconv.FormatInt(ci.Offset, 10)))
|
||||||
|
if err == nil {
|
||||||
|
cntChunks++
|
||||||
|
needToClean -= ci.Size
|
||||||
|
if needToClean <= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Infof("cache", "deleted (%v) chunks", cntChunks)
|
fs.Infof("cache", "deleted (%v) chunks", cntChunks)
|
||||||
@ -576,11 +591,6 @@ func (b *Persistent) CleanEntriesByAge(entryAge time.Duration) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanChunksByNeed is a noop for this implementation
|
|
||||||
func (b *Persistent) CleanChunksByNeed(offset int64) {
|
|
||||||
// noop: we want to clean a Bolt DB by time only
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stats returns a go map with the stats key values
|
// Stats returns a go map with the stats key values
|
||||||
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||||
r := make(map[string]map[string]interface{})
|
r := make(map[string]map[string]interface{})
|
||||||
@ -590,6 +600,7 @@ func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
|||||||
r["data"]["newest-ts"] = time.Now()
|
r["data"]["newest-ts"] = time.Now()
|
||||||
r["data"]["newest-file"] = ""
|
r["data"]["newest-file"] = ""
|
||||||
r["data"]["total-chunks"] = 0
|
r["data"]["total-chunks"] = 0
|
||||||
|
r["data"]["total-size"] = int64(0)
|
||||||
r["files"] = make(map[string]interface{})
|
r["files"] = make(map[string]interface{})
|
||||||
r["files"]["oldest-ts"] = time.Now()
|
r["files"]["oldest-ts"] = time.Now()
|
||||||
r["files"]["oldest-name"] = ""
|
r["files"]["oldest-name"] = ""
|
||||||
@ -612,19 +623,32 @@ func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
|||||||
r["files"]["total-files"] = totalFiles
|
r["files"]["total-files"] = totalFiles
|
||||||
|
|
||||||
c := dataTsBucket.Cursor()
|
c := dataTsBucket.Cursor()
|
||||||
|
|
||||||
|
totalChunks := 0
|
||||||
|
totalSize := int64(0)
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
var ci chunkInfo
|
||||||
|
err := json.Unmarshal(v, &ci)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalChunks++
|
||||||
|
totalSize += ci.Size
|
||||||
|
}
|
||||||
|
r["data"]["total-chunks"] = totalChunks
|
||||||
|
r["data"]["total-size"] = totalSize
|
||||||
|
|
||||||
if k, v := c.First(); k != nil {
|
if k, v := c.First(); k != nil {
|
||||||
// split to get (abs path - offset)
|
var ci chunkInfo
|
||||||
val := string(v[:])
|
_ = json.Unmarshal(v, &ci)
|
||||||
p := val[:strings.LastIndex(val, "-")]
|
|
||||||
r["data"]["oldest-ts"] = time.Unix(0, btoi(k))
|
r["data"]["oldest-ts"] = time.Unix(0, btoi(k))
|
||||||
r["data"]["oldest-file"] = p
|
r["data"]["oldest-file"] = ci.Path
|
||||||
}
|
}
|
||||||
if k, v := c.Last(); k != nil {
|
if k, v := c.Last(); k != nil {
|
||||||
// split to get (abs path - offset)
|
var ci chunkInfo
|
||||||
val := string(v[:])
|
_ = json.Unmarshal(v, &ci)
|
||||||
p := val[:strings.LastIndex(val, "-")]
|
|
||||||
r["data"]["newest-ts"] = time.Unix(0, btoi(k))
|
r["data"]["newest-ts"] = time.Unix(0, btoi(k))
|
||||||
r["data"]["newest-file"] = p
|
r["data"]["newest-file"] = ci.Path
|
||||||
}
|
}
|
||||||
|
|
||||||
c = rootTsBucket.Cursor()
|
c = rootTsBucket.Cursor()
|
||||||
@ -671,13 +695,17 @@ func (b *Persistent) Purge() {
|
|||||||
// GetChunkTs retrieves the current timestamp of this chunk
|
// GetChunkTs retrieves the current timestamp of this chunk
|
||||||
func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
||||||
var t time.Time
|
var t time.Time
|
||||||
tsVal := path + "-" + strconv.FormatInt(offset, 10)
|
|
||||||
|
|
||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||||
c := tsBucket.Cursor()
|
c := tsBucket.Cursor()
|
||||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
if bytes.Equal(v, []byte(tsVal)) {
|
var ci chunkInfo
|
||||||
|
err := json.Unmarshal(v, &ci)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ci.Path == path && ci.Offset == offset {
|
||||||
t = time.Unix(0, btoi(k))
|
t = time.Unix(0, btoi(k))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// +build !plan9
|
// +build !plan9,go1.7
|
||||||
|
|
||||||
package cachestats
|
package cachestats
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// Build for cache for unsupported platforms to stop go complaining
|
// Build for cache for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build plan9
|
// +build plan9 !go1.7
|
||||||
|
|
||||||
package cachestats
|
package cachestats
|
||||||
|
@ -27,7 +27,7 @@ c) Copy remote
|
|||||||
s) Set configuration password
|
s) Set configuration password
|
||||||
q) Quit config
|
q) Quit config
|
||||||
n/r/c/s/q> n
|
n/r/c/s/q> n
|
||||||
name> remote
|
name> test-cache
|
||||||
Type of storage to configure.
|
Type of storage to configure.
|
||||||
Choose a number from below, or type in your own value
|
Choose a number from below, or type in your own value
|
||||||
...
|
...
|
||||||
@ -39,6 +39,19 @@ Remote to cache.
|
|||||||
Normally should contain a ':' and a path, eg "myremote:path/to/dir",
|
Normally should contain a ':' and a path, eg "myremote:path/to/dir",
|
||||||
"myremote:bucket" or maybe "myremote:" (not recommended).
|
"myremote:bucket" or maybe "myremote:" (not recommended).
|
||||||
remote> local:/test
|
remote> local:/test
|
||||||
|
Optional: The URL of the Plex server
|
||||||
|
plex_url> http://127.0.0.1:32400
|
||||||
|
Optional: The username of the Plex user
|
||||||
|
plex_username> dummyusername
|
||||||
|
Optional: The password of the Plex user
|
||||||
|
y) Yes type in my own password
|
||||||
|
g) Generate random password
|
||||||
|
n) No leave this optional password blank
|
||||||
|
y/g/n> y
|
||||||
|
Enter the password:
|
||||||
|
password:
|
||||||
|
Confirm the password:
|
||||||
|
password:
|
||||||
The size of a chunk. Lower value good for slow connections but can affect seamless reading.
|
The size of a chunk. Lower value good for slow connections but can affect seamless reading.
|
||||||
Default: 5M
|
Default: 5M
|
||||||
Choose a number from below, or type in your own value
|
Choose a number from below, or type in your own value
|
||||||
@ -60,36 +73,26 @@ Choose a number from below, or type in your own value
|
|||||||
3 / 24 hours
|
3 / 24 hours
|
||||||
\ "48h"
|
\ "48h"
|
||||||
info_age> 2
|
info_age> 2
|
||||||
How much time should a chunk (file data) be stored in cache.
|
The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.
|
||||||
Accepted units are: "s", "m", "h".
|
Default: 10G
|
||||||
Default: 3h
|
|
||||||
Choose a number from below, or type in your own value
|
Choose a number from below, or type in your own value
|
||||||
1 / 30 seconds
|
1 / 500 MB
|
||||||
\ "30s"
|
\ "500M"
|
||||||
2 / 1 minute
|
2 / 1 GB
|
||||||
\ "1m"
|
\ "1G"
|
||||||
3 / 1 hour and 30 minutes
|
3 / 10 GB
|
||||||
\ "1h30m"
|
\ "10G"
|
||||||
chunk_age> 3h
|
chunk_total_size> 3
|
||||||
How much time should data be cached during warm up.
|
|
||||||
Accepted units are: "s", "m", "h".
|
|
||||||
Default: 24h
|
|
||||||
Choose a number from below, or type in your own value
|
|
||||||
1 / 3 hours
|
|
||||||
\ "3h"
|
|
||||||
2 / 6 hours
|
|
||||||
\ "6h"
|
|
||||||
3 / 24 hours
|
|
||||||
\ "24h"
|
|
||||||
warmup_age> 3
|
|
||||||
Remote config
|
Remote config
|
||||||
--------------------
|
--------------------
|
||||||
[test-cache]
|
[test-cache]
|
||||||
remote = local:/test
|
remote = local:/test
|
||||||
|
plex_url = http://127.0.0.1:32400
|
||||||
|
plex_username = dummyusername
|
||||||
|
plex_password = *** ENCRYPTED ***
|
||||||
chunk_size = 5M
|
chunk_size = 5M
|
||||||
info_age = 24h
|
info_age = 48h
|
||||||
chunk_age = 3h
|
chunk_total_size = 10G
|
||||||
warmup_age = 24h
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can then use it like this,
|
You can then use it like this,
|
||||||
@ -126,30 +129,31 @@ and cloud providers, the cache remote can split multiple requests to the
|
|||||||
cloud provider for smaller file chunks and combines them together locally
|
cloud provider for smaller file chunks and combines them together locally
|
||||||
where they can be available almost immediately before the reader usually
|
where they can be available almost immediately before the reader usually
|
||||||
needs them.
|
needs them.
|
||||||
|
|
||||||
This is similar to buffering when media files are played online. Rclone
|
This is similar to buffering when media files are played online. Rclone
|
||||||
will stay around the current marker but always try its best to stay ahead
|
will stay around the current marker but always try its best to stay ahead
|
||||||
and prepare the data before.
|
and prepare the data before.
|
||||||
|
|
||||||
#### Warmup mode ####
|
#### Plex Integration ####
|
||||||
|
|
||||||
A negative side of running multiple requests on the cloud provider is
|
There is a direct integration with Plex which allows cache to detect during reading
|
||||||
that you can easily reach a limit on how many requests or how much data
|
if the file is in playback or not. This helps cache to adapt how it queries
|
||||||
you can download from a cloud provider in a window of time.
|
the cloud provider depending on what is needed for.
|
||||||
For this reason, a warmup mode is a state where `cache` changes its settings
|
|
||||||
to talk less with the cloud provider.
|
|
||||||
|
|
||||||
To prevent a ban or a similar action from the cloud provider, `cache` will
|
Scans will have a minimum amount of workers (1) while in a confirmed playback cache
|
||||||
keep track of all the open files and during intensive times when it passes
|
will deploy the configured number of workers.
|
||||||
a configured threshold, it will change its settings to a warmup mode.
|
|
||||||
|
|
||||||
It can also be disabled during single file streaming if `cache` sees that we're
|
This integration opens the doorway to additional performance improvements
|
||||||
reading the file in sequence and can go load its parts in advance.
|
which will be explored in the near future.
|
||||||
|
|
||||||
|
**Note:** If Plex options are not configured, `cache` will function with its
|
||||||
|
configured options without adapting any of its settings.
|
||||||
|
|
||||||
|
How to enable? Run `rclone config` and add all the Plex options (endpoint, username
|
||||||
|
and password) in your remote and it will be automatically enabled.
|
||||||
|
|
||||||
Affected settings:
|
Affected settings:
|
||||||
- `cache-chunk-no-memory`: _disabled_
|
- `cache-workers`: _Configured value_ during confirmed playback or _1_ all the other times
|
||||||
- `cache-workers`: _1_
|
|
||||||
- file chunks will now be cached using `cache-warm-up-age` as a duration instead of the
|
|
||||||
regular `cache-chunk-age`
|
|
||||||
|
|
||||||
### Known issues ###
|
### Known issues ###
|
||||||
|
|
||||||
@ -194,6 +198,22 @@ connections.
|
|||||||
|
|
||||||
**Default**: 5M
|
**Default**: 5M
|
||||||
|
|
||||||
|
#### --cache-total-chunk-size=SIZE ####
|
||||||
|
|
||||||
|
The total size that the chunks can take up on the local disk. If `cache`
|
||||||
|
exceeds this value then it will start to the delete the oldest chunks until
|
||||||
|
it goes under this value.
|
||||||
|
|
||||||
|
**Default**: 10G
|
||||||
|
|
||||||
|
#### --cache-chunk-clean-interval=DURATION ####
|
||||||
|
|
||||||
|
How often should `cache` perform cleanups of the chunk storage. The default value
|
||||||
|
should be ok for most people. If you find that `cache` goes over `cache-total-chunk-size`
|
||||||
|
too often then try to lower this value to force it to perform cleanups more often.
|
||||||
|
|
||||||
|
**Default**: 1m
|
||||||
|
|
||||||
#### --cache-info-age=DURATION ####
|
#### --cache-info-age=DURATION ####
|
||||||
|
|
||||||
How long to keep file structure information (directory listings, file size,
|
How long to keep file structure information (directory listings, file size,
|
||||||
@ -204,25 +224,6 @@ this value very large as the cache store will also be updated in real time.
|
|||||||
|
|
||||||
**Default**: 6h
|
**Default**: 6h
|
||||||
|
|
||||||
#### --cache-chunk-age=DURATION ####
|
|
||||||
|
|
||||||
How long to keep file chunks (partial data) locally.
|
|
||||||
|
|
||||||
Longer durations will result in larger cache stores as data will be cleared
|
|
||||||
less often.
|
|
||||||
|
|
||||||
**Default**: 3h
|
|
||||||
|
|
||||||
#### --cache-warm-up-age=DURATION ####
|
|
||||||
|
|
||||||
How long to keep file chunks (partial data) locally during warmup times.
|
|
||||||
|
|
||||||
If `cache` goes through intensive read times when it is scanned for information
|
|
||||||
then this setting will allow you to customize higher storage times for that
|
|
||||||
data. Otherwise, it's safe to keep the same value as `cache-chunk-age`.
|
|
||||||
|
|
||||||
**Default**: 3h
|
|
||||||
|
|
||||||
#### --cache-read-retries=RETRIES ####
|
#### --cache-read-retries=RETRIES ####
|
||||||
|
|
||||||
How many times to retry a read from a cache storage.
|
How many times to retry a read from a cache storage.
|
||||||
@ -235,7 +236,7 @@ able to provide file data anymore.
|
|||||||
For really slow connections, increase this to a point where the stream is
|
For really slow connections, increase this to a point where the stream is
|
||||||
able to provide data but your experience will be very stuttering.
|
able to provide data but your experience will be very stuttering.
|
||||||
|
|
||||||
**Default**: 3
|
**Default**: 10
|
||||||
|
|
||||||
#### --cache-workers=WORKERS ####
|
#### --cache-workers=WORKERS ####
|
||||||
|
|
||||||
@ -247,6 +248,9 @@ This impacts several aspects like the cloud provider API limits, more stress
|
|||||||
on the hardware that rclone runs on but it also means that streams will
|
on the hardware that rclone runs on but it also means that streams will
|
||||||
be more fluid and data will be available much more faster to readers.
|
be more fluid and data will be available much more faster to readers.
|
||||||
|
|
||||||
|
**Note**: If the optional Plex integration is enabled then this setting
|
||||||
|
will adapt to the type of reading performed and the value specified here will be used
|
||||||
|
as a maximum number of workers to use.
|
||||||
**Default**: 4
|
**Default**: 4
|
||||||
|
|
||||||
#### --cache-chunk-no-memory ####
|
#### --cache-chunk-no-memory ####
|
||||||
@ -268,9 +272,7 @@ available on the local machine.
|
|||||||
|
|
||||||
#### --cache-rps=NUMBER ####
|
#### --cache-rps=NUMBER ####
|
||||||
|
|
||||||
Some of the rclone remotes that `cache` will wrap have back-off or limits
|
This setting places a hard limit on the number of requests per second that `cache`
|
||||||
in place to not reach cloud provider limits. This is similar to that.
|
|
||||||
It places a hard limit on the number of requests per second that `cache`
|
|
||||||
will be doing to the cloud provider remote and try to respect that value
|
will be doing to the cloud provider remote and try to respect that value
|
||||||
by setting waits between reads.
|
by setting waits between reads.
|
||||||
|
|
||||||
@ -278,27 +280,13 @@ If you find that you're getting banned or limited on the cloud provider
|
|||||||
through cache and know that a smaller number of requests per second will
|
through cache and know that a smaller number of requests per second will
|
||||||
allow you to work with it then you can use this setting for that.
|
allow you to work with it then you can use this setting for that.
|
||||||
|
|
||||||
A good balance of all the other settings and warmup times should make this
|
A good balance of all the other settings should make this
|
||||||
setting useless but it is available to set for more special cases.
|
setting useless but it is available to set for more special cases.
|
||||||
|
|
||||||
**NOTE**: This will limit the number of requests during streams but other
|
**NOTE**: This will limit the number of requests during streams but other
|
||||||
API calls to the cloud provider like directory listings will still pass.
|
API calls to the cloud provider like directory listings will still pass.
|
||||||
|
|
||||||
**Default**: 4
|
**Default**: disabled
|
||||||
|
|
||||||
#### --cache-warm-up-rps=RATE/SECONDS ####
|
|
||||||
|
|
||||||
This setting allows `cache` to change its settings for warmup mode or revert
|
|
||||||
back from it.
|
|
||||||
|
|
||||||
`cache` keeps track of all open files and when there are `RATE` files open
|
|
||||||
during `SECONDS` window of time reached it will activate warmup and change
|
|
||||||
its settings as explained in the `Warmup mode` section.
|
|
||||||
|
|
||||||
When the number of files being open goes under `RATE` in the same amount
|
|
||||||
of time, `cache` will disable this mode.
|
|
||||||
|
|
||||||
**Default**: 3/20
|
|
||||||
|
|
||||||
#### --cache-writes ####
|
#### --cache-writes ####
|
||||||
|
|
||||||
|
@ -166,6 +166,6 @@ func main() {
|
|||||||
generateTestProgram(t, fns, "AzureBlob", buildConstraint("go1.7"))
|
generateTestProgram(t, fns, "AzureBlob", buildConstraint("go1.7"))
|
||||||
generateTestProgram(t, fns, "Pcloud")
|
generateTestProgram(t, fns, "Pcloud")
|
||||||
generateTestProgram(t, fns, "Webdav")
|
generateTestProgram(t, fns, "Webdav")
|
||||||
generateTestProgram(t, fns, "Cache", buildConstraint("!plan9"))
|
generateTestProgram(t, fns, "Cache", buildConstraint("!plan9,go1.7"))
|
||||||
log.Printf("Done")
|
log.Printf("Done")
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user