diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4ab7fac12..2f071716a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -178,7 +178,7 @@ with modules beneath. * mockdir - mocks an fs.Directory * mockobject - mocks an fs.Object * test_all - Runs integration tests for everything - * graphics - the images used in the website etc + * graphics - the images used in the website, etc. * lib - libraries used by the backend * atexit - register functions to run when rclone exits * dircache - directory ID to name caching @@ -202,7 +202,7 @@ for the flag help, the remainder is shown to the user in `rclone config` and is added to the docs with `make backenddocs`. The only documentation you need to edit are the `docs/content/*.md` -files. The MANUAL.*, rclone.1, web site etc are all auto generated +files. The MANUAL.*, rclone.1, web site, etc. are all auto generated from those during the release process. See the `make doc` and `make website` targets in the Makefile if you are interested in how. You don't need to run these when adding a feature. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 00774c926..654da33c6 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -37,7 +37,7 @@ Rclone uses the labels like this: * `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project * `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project * `IMPORTANT` - note to maintainers not to forget to fix this for the release -* `maintenance` - internal enhancement, code re-organisation etc +* `maintenance` - internal enhancement, code re-organisation, etc. * `Needs Go 1.XX` - waiting for that version of Go to be released * `question` - not a `bug` or `enhancement` - direct to the forum for next time * `Remote: XXX` - which rclone backend this affects diff --git a/Makefile b/Makefile index b453d2033..11a5b8f08 100644 --- a/Makefile +++ b/Makefile @@ -233,7 +233,7 @@ tag: retag doc @echo "Edit the new changelog in docs/content/changelog.md" @echo "Then commit all the changes" @echo git commit -m \"Version $(VERSION)\" -a -v - @echo "And finally run make retag before make cross etc" + @echo "And finally run make retag before make cross, etc." retag: @echo "Version is $(VERSION)" diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index 7518dac8d..46cc02342 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -523,7 +523,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e } entries = append(entries, o) default: - // ignore ASSET etc + // ignore ASSET, etc. } return false }) diff --git a/backend/cache/cache.go b/backend/cache/cache.go index 089d9c3b5..44b788ca8 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -109,7 +109,7 @@ will need to be cleared or unexpected EOF errors will occur.`, }}, }, { Name: "info_age", - Help: `How long to cache file structure information (directory listings, file size, times etc). + Help: `How long to cache file structure information (directory listings, file size, times, etc.). If all write operations are done through the cache then you can safely make this value very large as the cache store will also be updated in real time.`, Default: DefCacheInfoAge, diff --git a/backend/chunker/chunker.go b/backend/chunker/chunker.go index 59470f2aa..ed01ecbdc 100644 --- a/backend/chunker/chunker.go +++ b/backend/chunker/chunker.go @@ -1222,7 +1222,7 @@ func (c *chunkingReader) accountBytes(bytesRead int64) { } } -// dummyRead updates accounting, hashsums etc by simulating reads +// dummyRead updates accounting, hashsums, etc. by simulating reads func (c *chunkingReader) dummyRead(in io.Reader, size int64) error { if c.hasher == nil && c.readCount+size > maxMetadataSize { c.accountBytes(size) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index b822c7608..570526e60 100755 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -284,8 +284,8 @@ Instructs rclone to operate on your "Shared with me" folder (where Google Drive lets you access the files and folders others have shared with you). -This works both with the "list" (lsd, lsl, etc) and the "copy" -commands (copy, sync, etc), and with all other commands too.`, +This works both with the "list" (lsd, lsl, etc.) and the "copy" +commands (copy, sync, etc.), and with all other commands too.`, Advanced: true, }, { Name: "trashed_only", diff --git a/backend/http/http.go b/backend/http/http.go index 8b33688a7..884b9375d 100644 --- a/backend/http/http.go +++ b/backend/http/http.go @@ -585,7 +585,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return errorReadOnly } -// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc) +// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.) func (o *Object) Storable() bool { return true } diff --git a/backend/pcloud/api/types.go b/backend/pcloud/api/types.go index 978d51049..8c0300b72 100644 --- a/backend/pcloud/api/types.go +++ b/backend/pcloud/api/types.go @@ -96,7 +96,7 @@ func (i *Item) ModTime() (t time.Time) { return t } -// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile etc methods +// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile, etc. methods type ItemResult struct { Error Metadata Item `json:"metadata"` diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index da5d36feb..d12410954 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -219,7 +219,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) { // Check if it is an api.Error if apiErr, ok := err.(*api.Error); ok { // See https://docs.pcloud.com/errors/ for error treatment - // Errors are classified as 1xxx, 2xxx etc + // Errors are classified as 1xxx, 2xxx, etc. switch apiErr.Result / 1000 { case 4: // 4xxx: rate limiting doRetry = true diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index 286f9fd1e..bc7b55f40 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -1255,7 +1255,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return nil } -// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc) +// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.) func (o *Object) Storable() bool { return o.mode.IsRegular() } diff --git a/backend/sharefile/upload.go b/backend/sharefile/upload.go index 05906978d..87d760f52 100644 --- a/backend/sharefile/upload.go +++ b/backend/sharefile/upload.go @@ -32,7 +32,7 @@ type largeUpload struct { wrap accounting.WrapFn // account parts being transferred size int64 // total size parts int64 // calculated number of parts, if known - info *api.UploadSpecification // where to post chunks etc + info *api.UploadSpecification // where to post chunks, etc. threads int // number of threads to use in upload streamed bool // set if using streamed upload } diff --git a/bin/test_proxy.py b/bin/test_proxy.py index 6f00ec757..1f3650fc8 100755 --- a/bin/test_proxy.py +++ b/bin/test_proxy.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ -A demo proxy for rclone serve sftp/webdav/ftp etc +A demo proxy for rclone serve sftp/webdav/ftp, etc. This takes the incoming user/pass and converts it into an sftp backend running on localhost. diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go index 789a60229..b52a45741 100644 --- a/cmd/lsjson/lsjson.go +++ b/cmd/lsjson/lsjson.go @@ -93,7 +93,7 @@ number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the nearest millisecond (e.g. Google Drive) then 3 digits will always be shown ("2017-05-31T16:15:57.034+01:00") whereas if the times are -accurate to the nearest second (Dropbox, Box, WebDav etc) no digits +accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown ("2017-05-31T16:15:57+01:00"). The whole output can be processed as a JSON blob, or alternatively it diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go index 9e1dfeaeb..312d3fd5d 100644 --- a/cmd/mountlib/mount.go +++ b/cmd/mountlib/mount.go @@ -279,7 +279,7 @@ for solutions to make ` + commandName + ` more reliable. ### Attribute caching You can use the flag --attr-timeout to set the time the kernel caches -the attributes (size, modification time etc) for directory entries. +the attributes (size, modification time, etc.) for directory entries. The default is "1s" which caches files just long enough to avoid too many callbacks to rclone from the kernel. diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index 5406e0d8f..97781b066 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -785,7 +785,7 @@ outer: } } } - // listen to key presses, etc + // listen to key presses, etc. } return nil } diff --git a/docs/content/cache.md b/docs/content/cache.md index f439ea8de..c2d6f7de9 100644 --- a/docs/content/cache.md +++ b/docs/content/cache.md @@ -77,7 +77,7 @@ Choose a number from below, or type in your own value 3 / 10 MB \ "10M" chunk_size> 2 -How much time should object info (file size, file hashes etc) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache. +How much time should object info (file size, file hashes, etc.) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache. Accepted units are: "s", "m", "h". Default: 5m Choose a number from below, or type in your own value @@ -372,7 +372,7 @@ will need to be cleared or unexpected EOF errors will occur. #### --cache-info-age -How long to cache file structure information (directory listings, file size, times etc). +How long to cache file structure information (directory listings, file size, times, etc.). If all write operations are done through the cache then you can safely make this value very large as the cache store will also be updated in real time. diff --git a/docs/content/changelog.md b/docs/content/changelog.md index 88739d615..113786ed9 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -1261,7 +1261,7 @@ all the docs and Edward Barker for helping re-write the front page. * Check for maximum length before decrypting filename to fix panic (Garry McNulty) * Azure Blob * Allow building azureblob backend on *BSD (themylogin) - * Use the rclone HTTP client to support `--dump headers`, `--tpslimit` etc (Nick Craig-Wood) + * Use the rclone HTTP client to support `--dump headers`, `--tpslimit`, etc. (Nick Craig-Wood) * Use the s3 pacer for 0 delay in non error conditions (Nick Craig-Wood) * Ignore directory markers (Nick Craig-Wood) * Stop Mkdir attempting to create existing containers (Nick Craig-Wood) @@ -1496,7 +1496,7 @@ all the docs and Edward Barker for helping re-write the front page. * Fix v2 auth on files with spaces (Nick Craig-Wood) * Union * Implement union backend which reads from multiple backends (Felix Brucker) - * Implement optional interfaces (Move, DirMove, Copy etc) (Nick Craig-Wood) + * Implement optional interfaces (Move, DirMove, Copy, etc.) (Nick Craig-Wood) * Fix ChangeNotify to support multiple remotes (Fabian Möller) * Fix `--backup-dir` on union backend (Nick Craig-Wood) * WebDAV @@ -2476,7 +2476,7 @@ Point release to fix hubic and azureblob backends. * Log -v output to stdout by default * Display the transfer stats in more human readable form * Make 0 size files specifiable with `--max-size 0b` - * Add `b` suffix so we can specify bytes in --bwlimit, --min-size etc + * Add `b` suffix so we can specify bytes in --bwlimit, --min-size, etc. * Use "password:" instead of "password>" prompt - thanks Klaus Post and Leigh Klotz * Bug Fixes * Fix retry doing one too many retries diff --git a/docs/content/chunker.md b/docs/content/chunker.md index 9b815f726..2250808a6 100644 --- a/docs/content/chunker.md +++ b/docs/content/chunker.md @@ -105,7 +105,7 @@ When upload completes, temporary chunk files are finally renamed. This scheme guarantees that operations can be run in parallel and look from outside as atomic. A similar method with hidden temporary chunks is used for other operations -(copy/move/rename etc). If an operation fails, hidden chunks are normally +(copy/move/rename, etc.). If an operation fails, hidden chunks are normally destroyed, and the target composite file stays intact. When a composite file download is requested, chunker transparently diff --git a/docs/content/docs.md b/docs/content/docs.md index dc1061648..898794668 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -220,7 +220,7 @@ Here are some gotchas which may help users unfamiliar with the shell rules ### Linux / OSX ### If your names have spaces or shell metacharacters (e.g. `*`, `?`, `$`, -`'`, `"` etc) then you must quote them. Use single quotes `'` by default. +`'`, `"`, etc.) then you must quote them. Use single quotes `'` by default. rclone copy 'Important files?' remote:backup diff --git a/docs/content/drive.md b/docs/content/drive.md index df3524a05..f7aefbce0 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -737,8 +737,8 @@ Instructs rclone to operate on your "Shared with me" folder (where Google Drive lets you access the files and folders others have shared with you). -This works both with the "list" (lsd, lsl, etc) and the "copy" -commands (copy, sync, etc), and with all other commands too. +This works both with the "list" (lsd, lsl, etc.) and the "copy" +commands (copy, sync, etc.), and with all other commands too. - Config: shared_with_me - Env Var: RCLONE_DRIVE_SHARED_WITH_ME diff --git a/docs/content/faq.md b/docs/content/faq.md index 83de3074e..b6218ac4e 100644 --- a/docs/content/faq.md +++ b/docs/content/faq.md @@ -8,7 +8,7 @@ Frequently Asked Questions ### Do all cloud storage systems support all rclone commands ### -Yes they do. All the rclone commands (e.g. `sync`, `copy` etc) will +Yes they do. All the rclone commands (e.g. `sync`, `copy`, etc.) will work on all the remote storage systems. ### Can I copy the config from one machine to another ### diff --git a/docs/content/flags.md b/docs/content/flags.md index aab6eadf5..eb6abaa9a 100755 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -214,7 +214,7 @@ and may be set in the config file. --cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend") --cache-db-purge Clear all the cached data for this remote on start. --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) - --cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s) + --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.). (default 6h0m0s) --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server --cache-plex-password string The password of the Plex user (obscured) --cache-plex-url string The URL of the Plex server diff --git a/docs/content/remote_setup.md b/docs/content/remote_setup.md index 3552ebe23..2cc9f3af7 100644 --- a/docs/content/remote_setup.md +++ b/docs/content/remote_setup.md @@ -89,6 +89,6 @@ Configuration file is stored at: /home/user/.rclone.conf ``` -Now transfer it to the remote box (scp, cut paste, ftp, sftp etc) and +Now transfer it to the remote box (scp, cut paste, ftp, sftp, etc.) and place it in the correct place (use `rclone config file` on the remote box to find out where). diff --git a/fs/fs.go b/fs/fs.go index 0a3dd730c..6e416cdfe 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -509,7 +509,7 @@ type Features struct { ReadMimeType bool // can read the mime type of objects WriteMimeType bool // can set the mime type of objects CanHaveEmptyDirectories bool // can have empty directories - BucketBased bool // is bucket based (like s3, swift etc) + BucketBased bool // is bucket based (like s3, swift, etc.) BucketBasedRootOK bool // is bucket based and can use from root SetTier bool // allows set tier functionality on objects GetTier bool // allows to retrieve storage tier of objects diff --git a/lib/rest/rest.go b/lib/rest/rest.go index 7ea7b6505..e6e0e6850 100644 --- a/lib/rest/rest.go +++ b/lib/rest/rest.go @@ -124,9 +124,9 @@ func (api *Client) SetCookie(cks ...*http.Cookie) *Client { return api } -// Opts contains parameters for Call, CallJSON etc +// Opts contains parameters for Call, CallJSON, etc. type Opts struct { - Method string // GET, POST etc + Method string // GET, POST, etc. Path string // relative to RootURL RootURL string // override RootURL passed into SetRoot() Body io.Reader diff --git a/notes.txt b/notes.txt index d467441e7..b6dbd1eaf 100644 --- a/notes.txt +++ b/notes.txt @@ -1,5 +1,5 @@ Change lsd command so it doesn't show -1 - * Make sure all Fses show -1 for objects Zero for dates etc + * Make sure all Fses show -1 for objects Zero for dates, etc. * Make test? Put the TestRemote names into the Fs description @@ -12,7 +12,7 @@ Todo * Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files * FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs creation in common code? Or try for as much as possible? - * FIXME Account all the transactions (ls etc) using a different + * FIXME Account all the transactions (ls, etc.) using a different Roundtripper wrapper which wraps the transactions? Ideas diff --git a/vfs/write.go b/vfs/write.go index 3b2c98905..4f0590312 100644 --- a/vfs/write.go +++ b/vfs/write.go @@ -67,7 +67,7 @@ func (fh *WriteFileHandle) openPending() (err error) { var pipeReader *io.PipeReader pipeReader, fh.pipeWriter = io.Pipe() go func() { - // NB Rcat deals with Stats.Transferring etc + // NB Rcat deals with Stats.Transferring, etc. o, err := operations.Rcat(context.TODO(), fh.file.Fs(), fh.remote, pipeReader, time.Now()) if err != nil { fs.Errorf(fh.remote, "WriteFileHandle.New Rcat failed: %v", err)