1
mirror of https://github.com/rclone/rclone synced 2024-11-20 21:27:33 +01:00

docs: fix typos found by codespell in docs and code comments

This commit is contained in:
Dimitri Papadopoulos Orfanos 2023-09-23 13:20:01 +02:00 committed by GitHub
parent 50b4a2398e
commit 3d473eb54e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 27 additions and 27 deletions

View File

@ -2119,7 +2119,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
return currentChunkSize, err
}
// Abort the multpart upload.
// Abort the multipart upload.
//
// FIXME it would be nice to delete uncommitted blocks.
//

View File

@ -154,7 +154,7 @@ func init() {
Default: "",
Help: `Impersonate this user ID when using a service account.
Settng this flag allows rclone, when using a JWT service account, to
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
@ -1212,7 +1212,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've alread processed.
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker

View File

@ -206,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can re-use and share
// cache *mega.Mega on username so we can reuse and share
// them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing

View File

@ -1215,7 +1215,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
return nil, fmt.Errorf("failed to upload: %w", err)
}
// refresh uploaded file info
// Compared to `newfile.File` this upgrades several feilds...
// Compared to `newfile.File` this upgrades several fields...
// audit, links, modified_time, phase, revision, and web_content_link
return f.getFile(ctx, newfile.File.ID)
}

View File

@ -5611,7 +5611,7 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
return currentChunkSize, err
}
// Abort the multpart upload
// Abort the multipart upload
func (w *s3ChunkWriter) Abort(ctx context.Context) error {
err := w.f.pacer.Call(func() (bool, error) {
_, err := w.f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{

View File

@ -1014,7 +1014,7 @@ func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []st
// save it so on reconnection we give back the previous string.
// This removes the ability to let the user correct a mistaken entry,
// but means that reconnects are transparent.
// We'll re-use config.Pass for this, 'cos we know it's not been
// We'll reuse config.Pass for this, 'cos we know it's not been
// specified.
func (f *Fs) getPass() (string, error) {
for f.savedpswd == "" {
@ -1602,7 +1602,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
fs.Debugf(f, "About path %q", aboutPath)
vfsStats, err = c.sftpClient.StatVFS(aboutPath)
}
f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be re-used
f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be reused
if vfsStats != nil {
total := vfsStats.TotalSpace()
free := vfsStats.FreeSpace()
@ -2044,7 +2044,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return fmt.Errorf("Update: %w", err)
}
// Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading
// Hang on to the connection for the whole upload so it doesn't get reused while we are uploading
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
o.fs.putSftpConnection(&c, err)

View File

@ -1,6 +1,6 @@
// Package cmd implements the rclone command
//
// It is in a sub package so it's internals can be re-used elsewhere
// It is in a sub package so it's internals can be reused elsewhere
package cmd
// FIXME only attach the remote flags when using a remote???

View File

@ -83,7 +83,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
// (128 kiB on Linux) and cannot be larger than MaxWrite.
//
// MaxReadAhead only affects buffered reads (=non-direct-io), but even then, the
// kernel can and does send larger reads to satisfy read reqests from applications
// kernel can and does send larger reads to satisfy read requests from applications
// (up to MaxWrite or VM_READAHEAD_PAGES=128 kiB, whichever is less).
MaxReadAhead int

View File

@ -442,7 +442,7 @@ Properties:
Impersonate this user ID when using a service account.
Settng this flag allows rclone, when using a JWT service account, to
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for

View File

@ -105,14 +105,14 @@ description: "Rclone Changelog"
* Fix 425 "TLS session of data connection not resumed" errors (Nick Craig-Wood)
* Hdfs
* Retry "replication in progress" errors when uploading (Nick Craig-Wood)
* Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
* Fix uploading to the wrong object on Update with overridden remote name (Nick Craig-Wood)
* HTTP
* CORS should not be sent if not set (yuudi)
* Fix webdav OPTIONS response (yuudi)
* Opendrive
* Fix List on a just deleted and remade directory (Nick Craig-Wood)
* Oracleobjectstorage
* Use rclone's rate limiter in mutipart transfers (Manoj Ghosh)
* Use rclone's rate limiter in multipart transfers (Manoj Ghosh)
* Implement `OpenChunkWriter` and multi-thread uploads (Manoj Ghosh)
* S3
* Refactor multipart upload to use `OpenChunkWriter` and `ChunkWriter` (Vitor Gomes)
@ -285,14 +285,14 @@ description: "Rclone Changelog"
* Fix quickxorhash on 32 bit architectures (Nick Craig-Wood)
* Report any list errors during `rclone cleanup` (albertony)
* Putio
* Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
* Fix uploading to the wrong object on Update with overridden remote name (Nick Craig-Wood)
* Fix modification times not being preserved for server side copy and move (Nick Craig-Wood)
* Fix server side copy failures (400 errors) (Nick Craig-Wood)
* S3
* Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
* Update Scaleway storage classes (Brian Starkey)
* Fix `--s3-versions` on individual objects (Nick Craig-Wood)
* Fix hang on aborting multpart upload with iDrive e2 (Nick Craig-Wood)
* Fix hang on aborting multipart upload with iDrive e2 (Nick Craig-Wood)
* Fix missing "tier" metadata (Nick Craig-Wood)
* Fix V3sign: add missing subresource delete (cc)
* Fix Arvancloud Domain and region changes and alphabetise the provider (Ehsan Tadayon)
@ -309,7 +309,7 @@ description: "Rclone Changelog"
* Code cleanup to avoid overwriting ctx before first use (fixes issue reported by the staticcheck linter) (albertony)
* Storj
* Fix "uplink: too many requests" errors when uploading to the same file (Nick Craig-Wood)
* Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
* Fix uploading to the wrong object on Update with overridden remote name (Nick Craig-Wood)
* Swift
* Ignore 404 error when deleting an object (Nick Craig-Wood)
* Union
@ -3938,7 +3938,7 @@ Point release to fix hubic and azureblob backends.
* Revert to copy when moving file across file system boundaries
* `--skip-links` to suppress symlink warnings (thanks Zhiming Wang)
* Mount
* Re-use `rcat` internals to support uploads from all remotes
* Reuse `rcat` internals to support uploads from all remotes
* Dropbox
* Fix "entry doesn't belong in directory" error
* Stop using deprecated API methods

View File

@ -712,7 +712,7 @@ has a header and is divided into chunks.
The initial nonce is generated from the operating systems crypto
strong random number generator. The nonce is incremented for each
chunk read making sure each nonce is unique for each block written.
The chance of a nonce being re-used is minuscule. If you wrote an
The chance of a nonce being reused is minuscule. If you wrote an
exabyte of data (10¹⁸ bytes) you would have a probability of
approximately 2×10⁻³² of re-using a nonce.

View File

@ -315,7 +315,7 @@ Make sure you have [Snapd installed](https://snapcraft.io/docs/installing-snapd)
```bash
$ sudo snap install rclone
```
Due to the strict confinement of Snap, rclone snap cannot acess real /home/$USER/.config/rclone directory, default config path is as below.
Due to the strict confinement of Snap, rclone snap cannot access real /home/$USER/.config/rclone directory, default config path is as below.
- Default config directory:
- /home/$USER/snap/rclone/current/.config/rclone
@ -575,7 +575,7 @@ It requires .NET Framework, but it is preinstalled on newer versions of Windows,
also provides alternative standalone distributions which includes necessary runtime (.NET 5).
WinSW is a command-line only utility, where you have to manually create an XML file with
service configuration. This may be a drawback for some, but it can also be an advantage
as it is easy to back up and re-use the configuration
as it is easy to back up and reuse the configuration
settings, without having go through manual steps in a GUI. One thing to note is that
by default it does not restart the service on error, one have to explicit enable this
in the configuration file (via the "onfailure" parameter).

View File

@ -209,7 +209,7 @@ rclone mount \
# its exact meaning will depend on the backend. For HTTP based backends it is an HTTP PUT/GET/POST/etc and its response
--cache-dir /tmp/rclone/cache # Directory rclone will use for caching.
--dir-cache-time 5m \ # Time to cache directory entries for (default 5m0s)
--vfs-cache-mode writes \ # Cache mode off|minimal|writes|full (default off), writes gives the maximum compatiblity like a local disk
--vfs-cache-mode writes \ # Cache mode off|minimal|writes|full (default off), writes gives the maximum compatibility like a local disk
--vfs-cache-max-age 20m \ # Max age of objects in the cache (default 1h0m0s)
--vfs-cache-max-size 10G \ # Max total size of objects in the cache (default off)
--vfs-cache-poll-interval 1m \ # Interval to poll the cache for stale objects (default 1m0s)
@ -372,7 +372,7 @@ Install NFS Utils
sudo yum install -y nfs-utils
```
Export the desired directory via NFS Server in the same machine where rclone has mounted to, ensure NFS serivce has
Export the desired directory via NFS Server in the same machine where rclone has mounted to, ensure NFS service has
desired permissions to read the directory. If it runs as root, then it will have permissions for sure, but if it runs
as separate user then ensure that user has necessary desired privileges.
```shell

View File

@ -152,7 +152,7 @@ func TestMemoryObject(t *testing.T) {
err = o.Update(context.Background(), newContent, src)
assert.NoError(t, err)
checkContent(o, newStr)
assert.Equal(t, "Rutaba", string(content)) // check we didn't re-use the buffer
assert.Equal(t, "Rutaba", string(content)) // check we didn't reuse the buffer
// now try streaming
newStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"

View File

@ -318,7 +318,7 @@ func TestRcSetTier(t *testing.T) {
r.CheckRemoteItems(t, file1)
// Because we don't know what the current tier options here are, let's
// just get the current tier, and re-use that
// just get the current tier, and reuse that
o, err := r.Fremote.NewObject(ctx, file1.Path)
require.NoError(t, err)
trr, ok := o.(fs.GetTierer)
@ -345,7 +345,7 @@ func TestRcSetTierFile(t *testing.T) {
r.CheckRemoteItems(t, file1)
// Because we don't know what the current tier options here are, let's
// just get the current tier, and re-use that
// just get the current tier, and reuse that
o, err := r.Fremote.NewObject(ctx, file1.Path)
require.NoError(t, err)
trr, ok := o.(fs.GetTierer)

View File

@ -345,7 +345,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
start, offset := dl.getRange()
// The downloader's offset to offset+window is the gap
// in which we would like to re-use this
// in which we would like to reuse this
// downloader. The downloader will never reach before
// start and offset+windows is too far away - we'd
// rather start another downloader.