From ce3b65e6dc0866cac041b82aabbf790ebd9778b6 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Sat, 13 Aug 2022 22:56:32 -0400 Subject: [PATCH] all: fix spelling across the project MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * abcdefghijklmnopqrstuvwxyz * accounting * additional * allowed * almost * already * appropriately * arise * bandwidth * behave * bidirectional * brackets * cached * characters * cloud * committing * concatenating * configured * constructs * current * cutoff * deferred * different * directory * disposition * dropbox * either way * error * excess * experiments * explicitly * externally * files * github * gzipped * hierarchies * huffman * hyphen * implicitly * independent * insensitive * integrity * libraries * literally * metadata * mimics * missing * modification * multipart * multiple * nightmare * nonexistent * number * obscure * ourselves * overridden * potatoes * preexisting * priority * received * remote * replacement * represents * reproducibility * response * satisfies * sensitive * separately * separator * specifying * string * successful * synchronization * syncing * šenfeld * take * temporarily * testcontents * that * the * themselves * throttling * timeout * transaction * transferred * unnecessary * using * webbrowser * which * with * workspace Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- CONTRIBUTING.md | 4 +-- backend/b2/api/types.go | 2 +- backend/box/api/types.go | 2 +- backend/chunker/chunker.go | 4 +-- backend/combine/combine.go | 2 +- backend/compress/compress.go | 10 +++---- backend/crypt/crypt.go | 2 +- backend/drive/drive.go | 2 +- backend/dropbox/batcher.go | 2 +- backend/dropbox/dropbox.go | 4 +-- backend/fichier/structs.go | 2 +- backend/filefabric/api/types.go | 4 +-- backend/filefabric/filefabric.go | 2 +- .../googlecloudstorage/googlecloudstorage.go | 2 +- backend/hidrive/hidrive.go | 6 ++-- .../hidrive/hidrivehash/hidrivehash_test.go | 2 +- backend/internetarchive/internetarchive.go | 6 ++-- backend/jottacloud/jottacloud.go | 2 +- backend/koofr/koofr.go | 2 +- backend/local/local_internal_test.go | 2 +- backend/local/setbtime.go | 2 +- backend/local/setbtime_windows.go | 2 +- backend/mega/mega.go | 4 +-- backend/netstorage/netstorage.go | 6 ++-- backend/onedrive/api/types.go | 2 +- backend/pcloud/api/types.go | 2 +- backend/s3/s3.go | 4 +-- backend/s3/s3_internal_test.go | 4 +-- backend/swift/swift_internal_test.go | 4 +-- backend/uptobox/api/types.go | 2 +- backend/uptobox/uptobox.go | 8 +++--- backend/zoho/api/types.go | 2 +- backend/zoho/zoho.go | 6 ++-- cmd/bisync/deltas.go | 2 +- cmd/bisync/help.go | 2 +- cmd/cmount/mountpoint_windows.go | 2 +- cmd/config/config.go | 2 +- cmd/hashsum/hashsum.go | 2 +- cmd/help.go | 2 +- cmd/ls/lshelp/lshelp.go | 2 +- cmd/lsjson/lsjson.go | 2 +- cmd/md5sum/md5sum.go | 2 +- cmd/mountlib/help.go | 8 +++--- cmd/mountlib/mount.go | 2 +- cmd/mountlib/utils.go | 4 +-- cmd/ncdu/ncdu.go | 2 +- cmd/serve/docker/unix.go | 2 +- cmd/serve/ftp/ftp.go | 2 +- cmd/sha1sum/sha1sum.go | 2 +- cmd/touch/touch.go | 4 +-- cmdtest/environment_test.go | 2 +- docs/content/bisync.md | 8 +++--- docs/content/changelog.md | 28 +++++++++---------- docs/content/compress.md | 2 +- docs/content/crypt.md | 6 ++-- docs/content/docs.md | 6 ++-- docs/content/drive.md | 2 +- docs/content/dropbox.md | 2 +- docs/content/filefabric.md | 2 +- docs/content/flags.md | 2 +- docs/content/ftp.md | 2 +- docs/content/googlecloudstorage.md | 2 +- docs/content/hidrive.md | 2 +- docs/content/install.md | 4 +-- docs/content/jottacloud.md | 2 +- docs/content/mega.md | 2 +- docs/content/netstorage.md | 2 +- docs/content/onedrive.md | 2 +- docs/content/overview.md | 2 +- docs/content/rc.md | 4 +-- docs/content/s3.md | 2 +- docs/content/sftp.md | 8 +++--- fs/accounting/stats.go | 2 +- fs/cache/cache_test.go | 2 +- fs/config/configfile/configfile.go | 2 +- fs/config/configmap/configmap.go | 2 +- fs/dirtree/dirtree.go | 2 +- fs/newfs.go | 2 +- fs/operations/check_test.go | 4 +-- fs/operations/lsjson_test.go | 2 +- fs/operations/operations.go | 2 +- fs/rc/webgui/plugins.go | 4 +-- fs/rc/webgui/rc.go | 2 +- fs/registry.go | 4 +-- fstest/fstests/fstests.go | 10 +++---- fstest/run.go | 2 +- lib/cache/cache_test.go | 6 ++-- lib/dircache/dircache.go | 2 +- lib/jwtutil/jwtutil.go | 4 +-- lib/rest/rest.go | 2 +- librclone/librclone/librclone.go | 2 +- vfs/dir.go | 2 +- vfs/vfscache/cache_test.go | 4 +-- vfs/vfscache/item.go | 2 +- vfs/vfscache/writeback/writeback_test.go | 4 +-- 95 files changed, 160 insertions(+), 160 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f299a4691..6f8ea2afa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,7 +77,7 @@ Make sure you * Add [documentation](#writing-documentation) for a new feature. * [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages). -When you are done with that push your changes to Github: +When you are done with that push your changes to GitHub: git push -u origin my-new-feature @@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits). -## Using Git and Github ## +## Using Git and GitHub ## ### Committing your changes ### diff --git a/backend/b2/api/types.go b/backend/b2/api/types.go index 5089db242..4d981d950 100644 --- a/backend/b2/api/types.go +++ b/backend/b2/api/types.go @@ -239,7 +239,7 @@ type GetFileInfoRequest struct { // If the original source of the file being uploaded has a last // modified time concept, Backblaze recommends using // src_last_modified_millis as the name, and a string holding the base -// 10 number number of milliseconds since midnight, January 1, 1970 +// 10 number of milliseconds since midnight, January 1, 1970 // UTC. This fits in a 64 bit integer such as the type "long" in the // programming language Java. It is intended to be compatible with // Java's time long. For example, it can be passed directly into the diff --git a/backend/box/api/types.go b/backend/box/api/types.go index d3549f6aa..6ff87761c 100644 --- a/backend/box/api/types.go +++ b/backend/box/api/types.go @@ -14,7 +14,7 @@ const ( timeFormat = `"` + time.RFC3339 + `"` ) -// Time represents represents date and time information for the +// Time represents date and time information for the // box API, by using RFC3339 type Time time.Time diff --git a/backend/chunker/chunker.go b/backend/chunker/chunker.go index 4ed8ec9e8..17da9393c 100644 --- a/backend/chunker/chunker.go +++ b/backend/chunker/chunker.go @@ -64,7 +64,7 @@ import ( // length of 13 decimals it makes a 7-digit base-36 number. // // When transactions is set to the norename style, data chunks will -// keep their temporary chunk names (with the transacion identifier +// keep their temporary chunk names (with the transaction identifier // suffix). To distinguish them from temporary chunks, the txn field // of the metadata file is set to match the transaction identifier of // the data chunks. @@ -1079,7 +1079,7 @@ func (o *Object) readMetadata(ctx context.Context) error { // readXactID returns the transaction ID stored in the passed metadata object func (o *Object) readXactID(ctx context.Context) (xactID string, err error) { - // if xactID has already been read and cahced return it now + // if xactID has already been read and cached return it now if o.xIDCached { return o.xactID, nil } diff --git a/backend/combine/combine.go b/backend/combine/combine.go index dab80a82b..34644a968 100644 --- a/backend/combine/combine.go +++ b/backend/combine/combine.go @@ -1,4 +1,4 @@ -// Package combine implents a backend to combine multipe remotes in a directory tree +// Package combine implents a backend to combine multiple remotes in a directory tree package combine /* diff --git a/backend/compress/compress.go b/backend/compress/compress.go index b53e725b8..2ae192eb9 100644 --- a/backend/compress/compress.go +++ b/backend/compress/compress.go @@ -90,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended. Levels 1 to 9 increase compression at the cost of speed. Going past 6 generally offers very little return. -Level -2 uses Huffmann encoding only. Only use if you know what you +Level -2 uses Huffman encoding only. Only use if you know what you are doing. Level 0 turns off compression.`, Default: sgzip.DefaultCompression, @@ -130,7 +130,7 @@ type Fs struct { features *fs.Features // optional features } -// NewFs contstructs an Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) @@ -451,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...) } - // Need to include what we allready read + // Need to include what we already read in = &ReadCloserWrapper{ Reader: io.MultiReader(bytes.NewReader(buf), in), Closer: in, @@ -731,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt } // If our new object is compressed we have to rename it with the correct size. - // Uncompressed objects don't store the size in the name so we they'll allready have the correct name. + // Uncompressed objects don't store the size in the name so we they'll already have the correct name. if compressible { wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object) if err != nil { @@ -742,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt return newObj, nil } -// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects +// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects // will break stuff. Right no I can't think of a way to make this work. // PutUnchecked uploads the object diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index bae2c9799..85972298e 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -125,7 +125,7 @@ names, or for debugging purposes.`, This option could help with shortening the encrypted filename. The suitable option would depend on the way your remote count the filename -length and if it's case sensitve.`, +length and if it's case sensitive.`, Default: "base32", Examples: []fs.OptionExample{ { diff --git a/backend/drive/drive.go b/backend/drive/drive.go index b4f310eaf..30c165b61 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -3305,7 +3305,7 @@ drives found and a combined drive. upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:" Adding this to the rclone config file will cause those team drives to -be accessible with the aliases shown. Any illegal charactes will be +be accessible with the aliases shown. Any illegal characters will be substituted with "_" and duplicate names will have numbers suffixed. It will also add a remote called AllDrives which shows all the shared drives combined into one directory tree. diff --git a/backend/dropbox/batcher.go b/backend/dropbox/batcher.go index 874bb93d9..82a4b5524 100644 --- a/backend/dropbox/batcher.go +++ b/backend/dropbox/batcher.go @@ -309,7 +309,7 @@ func (b *batcher) Shutdown() { } b.shutOnce.Do(func() { atexit.Unregister(b.atexit) - fs.Infof(b.f, "Commiting uploads - please wait...") + fs.Infof(b.f, "Committing uploads - please wait...") // show that batcher is shutting down close(b.closed) // quit the commitLoop by sending a quitRequest message diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 42686394d..740bd76e3 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -268,7 +268,7 @@ default based on the batch_mode in use. Advanced: true, }, { Name: "batch_commit_timeout", - Help: `Max time to wait for a batch to finish comitting`, + Help: `Max time to wait for a batch to finish committing`, Default: fs.Duration(10 * time.Minute), Advanced: true, }, { @@ -1669,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset delta := int64(correctOffset) - int64(cursor.Offset) skip += delta - what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip) + what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip) if skip < 0 { return false, fmt.Errorf("can't seek backwards to correct offset: %s", what) } else if skip == chunkSize { diff --git a/backend/fichier/structs.go b/backend/fichier/structs.go index 89673f7f3..02e50a632 100644 --- a/backend/fichier/structs.go +++ b/backend/fichier/structs.go @@ -84,7 +84,7 @@ type CopyFileResponse struct { URLs []FileCopy `json:"urls"` } -// FileCopy is used in the the CopyFileResponse +// FileCopy is used in the CopyFileResponse type FileCopy struct { FromURL string `json:"from_url"` ToURL string `json:"to_url"` diff --git a/backend/filefabric/api/types.go b/backend/filefabric/api/types.go index 926ad82a6..9f4fb5984 100644 --- a/backend/filefabric/api/types.go +++ b/backend/filefabric/api/types.go @@ -19,7 +19,7 @@ const ( timeFormatJSON = `"` + timeFormatParameters + `"` ) -// Time represents represents date and time information for the +// Time represents date and time information for the // filefabric API type Time time.Time @@ -95,7 +95,7 @@ type Status struct { // Warning string `json:"warning"` // obsolete } -// Status statisfies the error interface +// Status satisfies the error interface func (e *Status) Error() string { return fmt.Sprintf("%s (%s)", e.Message, e.Code) } diff --git a/backend/filefabric/filefabric.go b/backend/filefabric/filefabric.go index ecfedbd0b..7442814c5 100644 --- a/backend/filefabric/filefabric.go +++ b/backend/filefabric/filefabric.go @@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error { return f.purgeCheck(ctx, dir, false) } -// Wait for the the background task to complete if necessary +// Wait for the background task to complete if necessary func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) { if taskID == "" || taskID == "0" { // No task to wait for diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go index 5ce96e672..dc233bfcd 100644 --- a/backend/googlecloudstorage/googlecloudstorage.go +++ b/backend/googlecloudstorage/googlecloudstorage.go @@ -311,7 +311,7 @@ rclone does if you know the bucket exists already. Help: `If set this will decompress gzip encoded objects. It is possible to upload objects to GCS with "Content-Encoding: gzip" -set. Normally rclone will download these files files as compressed objects. +set. Normally rclone will download these files as compressed objects. If this flag is set then rclone will decompress these files with "Content-Encoding: gzip" as they are received. This means that rclone diff --git a/backend/hidrive/hidrive.go b/backend/hidrive/hidrive.go index b0f56844a..5004b8191 100644 --- a/backend/hidrive/hidrive.go +++ b/backend/hidrive/hidrive.go @@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction) } - // Do not allow the root-prefix to be non-existent nor a directory, + // Do not allow the root-prefix to be nonexistent nor a directory, // but it can be empty. if f.opt.RootPrefix != "" { item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields) @@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error { // should be retried after the parent-directories of the destination have been created. // If so, it will create the parent-directories. // -// If any errors arrise while finding the source or +// If any errors arise while finding the source or // creating the parent-directory those will be returned. // Otherwise returns the originalError. func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) { @@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } else { _, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency) } - // Try to check if object was updated, eitherway. + // Try to check if object was updated, either way. // Metadata should be updated even if the upload fails. info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields) } else { diff --git a/backend/hidrive/hidrivehash/hidrivehash_test.go b/backend/hidrive/hidrivehash/hidrivehash_test.go index d27970c34..07f2435b4 100644 --- a/backend/hidrive/hidrivehash/hidrivehash_test.go +++ b/backend/hidrive/hidrivehash/hidrivehash_test.go @@ -138,7 +138,7 @@ var testTable = []struct { // pattern describes how to use data to construct the hash-input. // For every entry n at even indices this repeats the data n times. // For every entry m at odd indices this repeats a null-byte m times. - // The input-data is constructed by concatinating the results in order. + // The input-data is constructed by concatenating the results in order. pattern []int64 out []byte name string diff --git a/backend/internetarchive/internetarchive.go b/backend/internetarchive/internetarchive.go index b3d4a55cb..4c1dc7f82 100644 --- a/backend/internetarchive/internetarchive.go +++ b/backend/internetarchive/internetarchive.go @@ -227,7 +227,7 @@ type Object struct { rawData json.RawMessage } -// IAFile reprensents a subset of object in MetadataResponse.Files +// IAFile represents a subset of object in MetadataResponse.Files type IAFile struct { Name string `json:"name"` // Source string `json:"source"` @@ -243,7 +243,7 @@ type IAFile struct { rawData json.RawMessage } -// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/ +// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/ type MetadataResponse struct { Files []IAFile `json:"files"` ItemSize int64 `json:"item_size"` @@ -1273,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string { return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/")) } -// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape +// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape func quotePath(s string) string { seg := strings.Split(s, "/") newValues := []string{} diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 001787451..45e8489ea 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -1418,7 +1418,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, } info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote) - // if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?) + // if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?) if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" { fs.Debugf(src, "Server-side copied to trashed destination, restoring") info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5) diff --git a/backend/koofr/koofr.go b/backend/koofr/koofr.go index 642ab79fe..54513f0c1 100644 --- a/backend/koofr/koofr.go +++ b/backend/koofr/koofr.go @@ -668,7 +668,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, // // https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F // - // I am not sure about meaning of "path" parameter; in my expriments + // I am not sure about meaning of "path" parameter; in my experiments // it is always "%2F", and omitting it or putting any other value // results in 404. // diff --git a/backend/local/local_internal_test.go b/backend/local/local_internal_test.go index 698d263a8..811831d91 100644 --- a/backend/local/local_internal_test.go +++ b/backend/local/local_internal_test.go @@ -192,7 +192,7 @@ func TestHashOnUpdate(t *testing.T) { require.NoError(t, err) assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5) - // Reupload it with diferent contents but same size and timestamp + // Reupload it with different contents but same size and timestamp var b = bytes.NewBufferString("CONTENT") src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f) err = o.Update(ctx, b, src) diff --git a/backend/local/setbtime.go b/backend/local/setbtime.go index 20a914bf2..5d2c462ee 100644 --- a/backend/local/setbtime.go +++ b/backend/local/setbtime.go @@ -9,7 +9,7 @@ import ( const haveSetBTime = false -// setBTime changes the the birth time of the file passed in +// setBTime changes the birth time of the file passed in func setBTime(name string, btime time.Time) error { // Does nothing return nil diff --git a/backend/local/setbtime_windows.go b/backend/local/setbtime_windows.go index 2b8fd98d5..adb9efa3a 100644 --- a/backend/local/setbtime_windows.go +++ b/backend/local/setbtime_windows.go @@ -11,7 +11,7 @@ import ( const haveSetBTime = true -// setBTime sets the the birth time of the file passed in +// setBTime sets the birth time of the file passed in func setBTime(name string, btime time.Time) (err error) { h, err := syscall.Open(name, os.O_RDWR, 0755) if err != nil { diff --git a/backend/mega/mega.go b/backend/mega/mega.go index bb038506b..92176c795 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node * } } if err != nil { - return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err) + return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err) } // i is number of directories to create (may be 0) // node is directory to create them from @@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) { return f._rootNode, nil } - // Check for pre-existing root + // Check for preexisting root absRoot := f.srv.FS.GetRoot() node, err := f.findDir(absRoot, f.root) //log.Printf("findRoot findDir %p %v", node, err) diff --git a/backend/netstorage/netstorage.go b/backend/netstorage/netstorage.go index 313679e27..31adcbe7a 100755 --- a/backend/netstorage/netstorage.go +++ b/backend/netstorage/netstorage.go @@ -118,7 +118,7 @@ type Fs struct { filetype string // dir, file or symlink dirscreated map[string]bool // if implicit dir has been created already dirscreatedMutex sync.Mutex // mutex to protect dirscreated - statcache map[string][]File // cache successfull stat requests + statcache map[string][]File // cache successful stat requests statcacheMutex sync.RWMutex // RWMutex to protect statcache } @@ -424,7 +424,7 @@ func (f *Fs) getFileName(file *File) string { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.filetype == "" { // This happens in two scenarios. - // 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs. + // 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs. // 2. List/ListR is called from the context of test_all and not the regular rclone binary. err := f.initFs(ctx, dir) if err != nil { @@ -488,7 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { if f.filetype == "" { // This happens in two scenarios. - // 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs. + // 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs. // 2. List/ListR is called from the context of test_all and not the regular rclone binary. err := f.initFs(ctx, dir) if err != nil { diff --git a/backend/onedrive/api/types.go b/backend/onedrive/api/types.go index 9d5cad6ba..ca5626858 100644 --- a/backend/onedrive/api/types.go +++ b/backend/onedrive/api/types.go @@ -70,7 +70,7 @@ type Drive struct { Quota Quota `json:"quota"` } -// Timestamp represents represents date and time information for the +// Timestamp represents date and time information for the // OneDrive API, by using ISO 8601 and is always in UTC time. type Timestamp time.Time diff --git a/backend/pcloud/api/types.go b/backend/pcloud/api/types.go index 5ae2b6b6b..c1b5dc217 100644 --- a/backend/pcloud/api/types.go +++ b/backend/pcloud/api/types.go @@ -13,7 +13,7 @@ const ( timeFormat = `"` + time.RFC1123Z + `"` ) -// Time represents represents date and time information for the +// Time represents date and time information for the // pcloud API, by using RFC1123Z type Time time.Time diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 1f86b8592..7b3a81a29 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -2009,7 +2009,7 @@ See [the time option docs](/docs/#time-option) for valid formats. Help: `If set this will decompress gzip encoded objects. It is possible to upload objects to S3 with "Content-Encoding: gzip" -set. Normally rclone will download these files files as compressed objects. +set. Normally rclone will download these files as compressed objects. If this flag is set then rclone will decompress these files with "Content-Encoding: gzip" as they are received. This means that rclone @@ -5199,7 +5199,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op var head s3.HeadObjectOutput //structs.SetFrom(&head, &req) setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req) - head.ETag = &md5sumHex // doesn't matter quotes are misssing + head.ETag = &md5sumHex // doesn't matter quotes are missing head.ContentLength = &size // If we have done a single part PUT request then we can read these if gotEtag != "" { diff --git a/backend/s3/s3_internal_test.go b/backend/s3/s3_internal_test.go index be12cc42d..9d22ca69b 100644 --- a/backend/s3/s3_internal_test.go +++ b/backend/s3/s3_internal_test.go @@ -78,7 +78,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) { } t.Run("GzipEncoding", func(t *testing.T) { - // Test that the gziped file we uploaded can be + // Test that the gzipped file we uploaded can be // downloaded with and without decompression checkDownload := func(wantContents string, wantSize int64, wantHash string) { gotContents := fstests.ReadObject(ctx, t, o, -1) @@ -116,7 +116,7 @@ func (f *Fs) InternalTestNoHead(t *testing.T) { defer func() { assert.NoError(t, obj.Remove(ctx)) }() - // PutTestcontests checks the received object + // PutTestcontents checks the received object } diff --git a/backend/swift/swift_internal_test.go b/backend/swift/swift_internal_test.go index c85355084..49963fc61 100644 --- a/backend/swift/swift_internal_test.go +++ b/backend/swift/swift_internal_test.go @@ -16,8 +16,8 @@ func TestInternalUrlEncode(t *testing.T) { want string }{ {"", ""}, - {"abcdefghijklmopqrstuvwxyz", "abcdefghijklmopqrstuvwxyz"}, - {"ABCDEFGHIJKLMOPQRSTUVWXYZ", "ABCDEFGHIJKLMOPQRSTUVWXYZ"}, + {"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"}, + {"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"}, {"0123456789", "0123456789"}, {"abc/ABC/123", "abc/ABC/123"}, {" ", "%20%20%20"}, diff --git a/backend/uptobox/api/types.go b/backend/uptobox/api/types.go index 4e842c8ab..8cf197754 100644 --- a/backend/uptobox/api/types.go +++ b/backend/uptobox/api/types.go @@ -80,7 +80,7 @@ type UploadInfo struct { } `json:"data"` } -// UploadResponse is the respnse to a successful upload +// UploadResponse is the response to a successful upload type UploadResponse struct { Files []struct { Name string `json:"name"` diff --git a/backend/uptobox/uptobox.go b/backend/uptobox/uptobox.go index fb3bbd7db..d403341bf 100644 --- a/backend/uptobox/uptobox.go +++ b/backend/uptobox/uptobox.go @@ -163,7 +163,7 @@ func (f *Fs) splitPathFull(pth string) (string, string) { return "//" + fullPath[:i], fullPath[i+1:] } -// splitPath is modified splitPath version that doesn't include the seperator +// splitPath is modified splitPath version that doesn't include the separator // in the base path func (f *Fs) splitPath(pth string) (string, string) { // chop of any leading or trailing '/' @@ -479,7 +479,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size } else if size == 0 { return nil, fs.ErrorCantUploadEmptyFiles } - // yes it does take take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :( + // yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :( // create upload request opts := rest.Opts{ @@ -757,7 +757,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string if err != nil { return fmt.Errorf("dirmove: source not found: %w", err) } - // check if the destination allready exists + // check if the destination already exists dstPath := f.dirPath(dstRemote) _, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1}) if err == nil { @@ -782,7 +782,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string needMove := srcBase != dstBase // if we have to rename we'll have to use a temporary name since - // there could allready be a directory with the same name as the src directory + // there could already be a directory with the same name as the src directory if needRename { // rename to a temporary name tmpName := "rcloneTemp" + random.String(8) diff --git a/backend/zoho/api/types.go b/backend/zoho/api/types.go index 755c57b9b..cb8b7bf44 100644 --- a/backend/zoho/api/types.go +++ b/backend/zoho/api/types.go @@ -6,7 +6,7 @@ import ( "time" ) -// Time represents represents date and time information for Zoho +// Time represents date and time information for Zoho // Zoho uses milliseconds since unix epoch (Java currentTimeMillis) type Time time.Time diff --git a/backend/zoho/zoho.go b/backend/zoho/zoho.go index 2af35daac..1e7556b03 100644 --- a/backend/zoho/zoho.go +++ b/backend/zoho/zoho.go @@ -150,8 +150,8 @@ func init() { return workspace.ID, workspace.Attributes.Name }) case "workspace_end": - worksspaceID := config.Result - m.Set(configRootID, worksspaceID) + workspaceID := config.Result + m.Set(configRootID, workspaceID) return nil, nil } return nil, fmt.Errorf("unknown state %q", config.State) @@ -1264,7 +1264,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op return err } - // upload was successfull, need to delete old object before rename + // upload was successful, need to delete old object before rename if err = o.Remove(ctx); err != nil { return fmt.Errorf("failed to remove old object: %w", err) } diff --git a/cmd/bisync/deltas.go b/cmd/bisync/deltas.go index ee66980c4..d1dee1a1d 100644 --- a/cmd/bisync/deltas.go +++ b/cmd/bisync/deltas.go @@ -290,7 +290,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change return } -// exccessDeletes checks whether number of deletes is within allowed range +// excessDeletes checks whether number of deletes is within allowed range func (ds *deltaSet) excessDeletes() bool { maxDelete := ds.opt.MaxDelete maxRatio := float64(maxDelete) / 100.0 diff --git a/cmd/bisync/help.go b/cmd/bisync/help.go index 74f588489..14e6f7866 100644 --- a/cmd/bisync/help.go +++ b/cmd/bisync/help.go @@ -15,7 +15,7 @@ func makeHelp(help string) string { return replacer.Replace(help) } -var shortHelp = `Perform bidirectonal synchronization between two paths.` +var shortHelp = `Perform bidirectional synchronization between two paths.` var rcHelp = makeHelp(`This takes the following parameters diff --git a/cmd/cmount/mountpoint_windows.go b/cmd/cmount/mountpoint_windows.go index 5cd0aa1d0..a1e511409 100644 --- a/cmd/cmount/mountpoint_windows.go +++ b/cmd/cmount/mountpoint_windows.go @@ -80,7 +80,7 @@ func handleDefaultMountpath() (string, error) { func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (string, error) { // Assuming mount path is a valid network share path (UNC format, "\\Server\Share"). // Always mount as network drive, regardless of the NetworkMode option. - // Find an unused drive letter to use as mountpoint, the the supplied path can + // Find an unused drive letter to use as mountpoint, the supplied path can // be used as volume prefix (network share path) instead of mountpoint. if !opt.NetworkMode { fs.Debugf(nil, "Forcing --network-mode because mountpoint path is network share UNC format") diff --git a/cmd/config/config.go b/cmd/config/config.go index 318685c7d..66b2a1c6d 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -140,7 +140,7 @@ are 100% certain you are already passing obscured passwords then use |rclone config password| command. The flag |--non-interactive| is for use by applications that wish to -configure rclone themeselves, rather than using rclone's text based +configure rclone themselves, rather than using rclone's text based configuration questions. If this flag is set, and rclone needs to ask the user a question, a JSON blob will be returned with the question in it. diff --git a/cmd/hashsum/hashsum.go b/cmd/hashsum/hashsum.go index 5655fdcf2..4a1993716 100644 --- a/cmd/hashsum/hashsum.go +++ b/cmd/hashsum/hashsum.go @@ -99,7 +99,7 @@ For the MD5 and SHA1 algorithms there are also dedicated commands, This command can also hash data received on standard input (stdin), by not passing a remote:path, or by passing a hyphen as remote:path -when there is data to read (if not, the hypen will be treated literaly, +when there is data to read (if not, the hyphen will be treated literally, as a relative path). Run without a hash to see the list of all supported hashes, e.g. diff --git a/cmd/help.go b/cmd/help.go index 0d1295d34..ecc34b63e 100644 --- a/cmd/help.go +++ b/cmd/help.go @@ -343,7 +343,7 @@ func showBackend(name string) { defaultValue := opt.GetValue() // Default value and Required are related: Required means option must // have a value, but if there is a default then a value does not have - // to be explicitely set and then Required makes no difference. + // to be explicitly set and then Required makes no difference. if defaultValue != "" { fmt.Printf("- Default: %s\n", quoteString(defaultValue)) } else { diff --git a/cmd/ls/lshelp/lshelp.go b/cmd/ls/lshelp/lshelp.go index 897c22f1c..3a98fff14 100644 --- a/cmd/ls/lshelp/lshelp.go +++ b/cmd/ls/lshelp/lshelp.go @@ -26,7 +26,7 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse. -Listing a non-existent directory will produce an error except for +Listing a nonexistent directory will produce an error except for remotes which can't have empty directories (e.g. s3, swift, or gcs - the bucket-based remotes). `, "|", "`") diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go index eb30f8578..a17ade327 100644 --- a/cmd/lsjson/lsjson.go +++ b/cmd/lsjson/lsjson.go @@ -84,7 +84,7 @@ If ` + "`--files-only`" + ` is not specified directories in addition to the file will be returned. If ` + "`--metadata`" + ` is set then an additional Metadata key will be returned. -This will have metdata in rclone standard format as a JSON object. +This will have metadata in rclone standard format as a JSON object. if ` + "`--stat`" + ` is set then a single JSON blob will be returned about the item pointed to. This will return an error if the item isn't found. diff --git a/cmd/md5sum/md5sum.go b/cmd/md5sum/md5sum.go index 09da8526f..91bc66e44 100644 --- a/cmd/md5sum/md5sum.go +++ b/cmd/md5sum/md5sum.go @@ -35,7 +35,7 @@ to running ` + "`rclone hashsum MD5 remote:path`" + `. This command can also hash data received on standard input (stdin), by not passing a remote:path, or by passing a hyphen as remote:path -when there is data to read (if not, the hypen will be treated literaly, +when there is data to read (if not, the hyphen will be treated literally, as a relative path). `, RunE: func(command *cobra.Command, args []string) error { diff --git a/cmd/mountlib/help.go b/cmd/mountlib/help.go index 10aea7b9f..7516157e8 100644 --- a/cmd/mountlib/help.go +++ b/cmd/mountlib/help.go @@ -88,7 +88,7 @@ and experience unexpected program errors, freezes or other issues, consider moun as a network drive instead. When mounting as a fixed disk drive you can either mount to an unused drive letter, -or to a path representing a **non-existent** subdirectory of an **existing** parent +or to a path representing a **nonexistent** subdirectory of an **existing** parent directory or drive. Using the special value |*| will tell rclone to automatically assign the next available drive letter, starting with Z: and moving backward. Examples: @@ -119,7 +119,7 @@ the mapped drive, shown in Windows Explorer etc, while the complete |\\server\share| will be reported as the remote UNC path by |net use| etc, just like a normal network drive mapping. -If you specify a full network share UNC path with |--volname|, this will implicitely +If you specify a full network share UNC path with |--volname|, this will implicitly set the |--network-mode| option, so the following two examples have same result: rclone @ remote:path/to/files X: --network-mode @@ -128,7 +128,7 @@ set the |--network-mode| option, so the following two examples have same result: You may also specify the network share UNC path as the mountpoint itself. Then rclone will automatically assign a drive letter, same as with |*| and use that as mountpoint, and instead use the UNC path specified as the volume name, as if it were -specified with the |--volname| option. This will also implicitely set +specified with the |--volname| option. This will also implicitly set the |--network-mode| option. This means the following two examples have same result: rclone @ remote:path/to/files \\cloud\remote @@ -164,7 +164,7 @@ The permissions on each entry will be set according to [options](#options) The default permissions corresponds to |--file-perms 0666 --dir-perms 0777|, i.e. read and write permissions to everyone. This means you will not be able -to start any programs from the the mount. To be able to do that you must add +to start any programs from the mount. To be able to do that you must add execute permissions, e.g. |--file-perms 0777 --dir-perms 0777| to add it to everyone. If the program needs to write files, chances are you will have to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)). diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go index ba3773e5c..907f16efd 100644 --- a/cmd/mountlib/mount.go +++ b/cmd/mountlib/mount.go @@ -238,7 +238,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) { return nil, err } - if err = m.CheckAllowings(); err != nil { + if err = m.CheckAllowed(); err != nil { return nil, err } m.SetVolumeName(m.MountOpt.VolumeName) diff --git a/cmd/mountlib/utils.go b/cmd/mountlib/utils.go index 1ec4d786a..099e3a767 100644 --- a/cmd/mountlib/utils.go +++ b/cmd/mountlib/utils.go @@ -62,9 +62,9 @@ func absPath(path string) string { return path } -// CheckAllowings informs about ignored flags on Windows. If not on Windows +// CheckAllowed informs about ignored flags on Windows. If not on Windows // and not --allow-non-empty flag is used, verify that mountpoint is empty. -func (m *MountPoint) CheckAllowings() error { +func (m *MountPoint) CheckAllowed() error { opt := &m.MountOpt if runtime.GOOS == "windows" { if opt.AllowNonEmpty { diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index 02427fb49..d56f69bfc 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -48,7 +48,7 @@ press '?' to toggle the help on and off. The supported keys are: ` + strings.Join(helpText()[1:], "\n ") + ` Listed files/directories may be prefixed by a one-character flag, -some of them combined with a description in brackes at end of line. +some of them combined with a description in brackets at end of line. These flags have the following meaning: e means this is an empty directory, i.e. contains no files (but diff --git a/cmd/serve/docker/unix.go b/cmd/serve/docker/unix.go index 3c533d454..8bf7b6fbc 100644 --- a/cmd/serve/docker/unix.go +++ b/cmd/serve/docker/unix.go @@ -25,7 +25,7 @@ func newUnixListener(path string, gid int) (net.Listener, string, error) { return nil, "", fmt.Errorf("expected only one socket from systemd, got %d", len(fds)) } - // create socket outselves + // create socket ourselves if filepath.Ext(path) == "" { path += ".sock" } diff --git a/cmd/serve/ftp/ftp.go b/cmd/serve/ftp/ftp.go index 08cca77f3..2023ba851 100644 --- a/cmd/serve/ftp/ftp.go +++ b/cmd/serve/ftp/ftp.go @@ -153,7 +153,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (*server, error) { } s.useTLS = s.opt.TLSKey != "" - // Check PassivePorts format since the the server library doesn't! + // Check PassivePorts format since the server library doesn't! if !passivePortsRe.MatchString(opt.PassivePorts) { return nil, fmt.Errorf("invalid format for passive ports %q", opt.PassivePorts) } diff --git a/cmd/sha1sum/sha1sum.go b/cmd/sha1sum/sha1sum.go index 25badd19c..053ca0cbb 100644 --- a/cmd/sha1sum/sha1sum.go +++ b/cmd/sha1sum/sha1sum.go @@ -35,7 +35,7 @@ to running ` + "`rclone hashsum SHA1 remote:path`" + `. This command can also hash data received on standard input (stdin), by not passing a remote:path, or by passing a hyphen as remote:path -when there is data to read (if not, the hypen will be treated literaly, +when there is data to read (if not, the hyphen will be treated literally, as a relative path). This command can also hash data received on STDIN, if not passing diff --git a/cmd/touch/touch.go b/cmd/touch/touch.go index 9f3d69b14..b0bea31b2 100644 --- a/cmd/touch/touch.go +++ b/cmd/touch/touch.go @@ -144,12 +144,12 @@ func Touch(ctx context.Context, f fs.Fs, remote string) error { return nil } if notCreateNewFile { - fs.Logf(f, "Not touching non-existent file due to --no-create") + fs.Logf(f, "Not touching nonexistent file due to --no-create") return nil } if recursive { // For consistency, --recursive never creates new files. - fs.Logf(f, "Not touching non-existent file due to --recursive") + fs.Logf(f, "Not touching nonexistent file due to --recursive") return nil } if operations.SkipDestructive(ctx, f, "touch (create)") { diff --git a/cmdtest/environment_test.go b/cmdtest/environment_test.go index 2124dc31c..b5b27eab5 100644 --- a/cmdtest/environment_test.go +++ b/cmdtest/environment_test.go @@ -81,7 +81,7 @@ func TestEnvironmentVariables(t *testing.T) { // Backend flags and remote name // - The listremotes command includes names from environment variables, // the part between "RCLONE_CONFIG_" and "_TYPE", converted to lowercase. - // - When using using a remote created from env, e.g. with lsd command, + // - When using a remote created from env, e.g. with lsd command, // the name is case insensitive in contrast to remotes in config file // (fs.ConfigToEnv converts to uppercase before checking environment). // - Previously using a remote created from env, e.g. with lsd command, diff --git a/docs/content/bisync.md b/docs/content/bisync.md index 68393d2a8..78bf06a0b 100644 --- a/docs/content/bisync.md +++ b/docs/content/bisync.md @@ -323,7 +323,7 @@ Most of these events come up due to a error status from an internal call. On such a critical error the `{...}.path1.lst` and `{...}.path2.lst` listing files are renamed to extension `.lst-err`, which blocks any future bisync runs (since the normal `.lst` files are not found). -Bisync keeps them under `bisync` subdirectory of the rclone cache direcory, +Bisync keeps them under `bisync` subdirectory of the rclone cache directory, typically at `${HOME}/.cache/rclone/bisync/` on Linux. Some errors are considered temporary and re-running the bisync is not blocked. @@ -421,7 +421,7 @@ don't have spelling case differences (`Smile.jpg` vs. `smile.jpg`). ## Windows support {#windows} Bisync has been tested on Windows 8.1, Windows 10 Pro 64-bit and on Windows -Github runners. +GitHub runners. Drive letters are allowed, including drive letters mapped to network drives (`rclone bisync J:\localsync GDrive:`). @@ -929,7 +929,7 @@ test command flags can be equally prefixed by a single `-` or double dash. synched tree even if there are check file mismatches in the test tree. - Some Dropbox tests can fail, notably printing the following message: `src and dst identical but can't set mod time without deleting and re-uploading` - This is expected and happens due a way Dropbox handles modificaion times. + This is expected and happens due a way Dropbox handles modification times. You should use the `-refresh-times` test flag to make up for this. - If Dropbox tests hit request limit for you and print error message `too_many_requests/...: Too many requests or write operations.` @@ -939,7 +939,7 @@ test command flags can be equally prefixed by a single `-` or double dash. ### Updating golden results Sometimes even a slight change in the bisync source can cause little changes -spread around many log files. Updating them manually would be a nighmare. +spread around many log files. Updating them manually would be a nightmare. The `-golden` flag will store the `test.log` and `*.lst` listings from each test case into respective golden directories. Golden results will diff --git a/docs/content/changelog.md b/docs/content/changelog.md index e4745b24d..0a6f32119 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -14,7 +14,7 @@ description: "Rclone Changelog" * build: Fix android build after GitHub actions change (Nick Craig-Wood) * dlna: Fix SOAP action header parsing (Joram Schrijver) * docs: Fix links to mount command from install docs (albertony) - * dropox: Fix ChangeNotify was unable to decrypt errors (Nick Craig-Wood) + * dropbox: Fix ChangeNotify was unable to decrypt errors (Nick Craig-Wood) * fs: Fix parsing of times and durations of the form "YYYY-MM-DD HH:MM:SS" (Nick Craig-Wood) * serve sftp: Fix checksum detection (Nick Craig-Wood) * sync: Add accidentally missed filter-sensitivity to --backup-dir option (Nick Naumann) @@ -274,7 +274,7 @@ description: "Rclone Changelog" * build * Fix ARM architecture version in .deb packages after nfpm change (Nick Craig-Wood) * Hard fork `github.com/jlaffaye/ftp` to fix `go get github.com/rclone/rclone` (Nick Craig-Wood) - * oauthutil: Fix crash when webrowser requests `/robots.txt` (Nick Craig-Wood) + * oauthutil: Fix crash when webbrowser requests `/robots.txt` (Nick Craig-Wood) * operations: Fix goroutine leak in case of copy retry (Ankur Gupta) * rc: * Fix `operations/publiclink` default for `expires` parameter (Nick Craig-Wood) @@ -360,7 +360,7 @@ description: "Rclone Changelog" * Add rclone to list of supported `md5sum`/`sha1sum` commands to look for (albertony) * Refactor so we only have one way of running remote commands (Nick Craig-Wood) * Fix timeout on hashing large files by sending keepalives (Nick Craig-Wood) - * Fix unecessary seeking when uploading and downloading files (Nick Craig-Wood) + * Fix unnecessary seeking when uploading and downloading files (Nick Craig-Wood) * Update docs on how to create `known_hosts` file (Nick Craig-Wood) * Storj * Rename tardigrade backend to storj backend (Nick Craig-Wood) @@ -961,8 +961,8 @@ description: "Rclone Changelog" * Add sort by average size in directory (Adam Plánský) * Add toggle option for average s3ize in directory - key 'a' (Adam Plánský) * Add empty folder flag into ncdu browser (Adam Plánský) - * Add `!` (errror) and `.` (unreadable) file flags to go with `e` (empty) (Nick Craig-Wood) - * obscure: Make `rclone osbcure -` ignore newline at end of line (Nick Craig-Wood) + * Add `!` (error) and `.` (unreadable) file flags to go with `e` (empty) (Nick Craig-Wood) + * obscure: Make `rclone obscure -` ignore newline at end of line (Nick Craig-Wood) * operations * Add logs when need to upload files to set mod times (Nick Craig-Wood) * Move and copy log name of the destination object in verbose (Adam Plánský) @@ -987,7 +987,7 @@ description: "Rclone Changelog" * Make the error count match up in the log message (Nick Craig-Wood) * move: Fix data loss when source and destination are the same object (Nick Craig-Wood) * operations - * Fix `--cutof-mode` hard not cutting off immediately (Nick Craig-Wood) + * Fix `--cutoff-mode` hard not cutting off immediately (Nick Craig-Wood) * Fix `--immutable` error message (Nick Craig-Wood) * sync * Fix `--cutoff-mode` soft & cautious so it doesn't end the transfer early (Nick Craig-Wood) @@ -1035,7 +1035,7 @@ description: "Rclone Changelog" * Fixed crash on an empty file name (lluuaapp) * Box * Fix NewObject for files that differ in case (Nick Craig-Wood) - * Fix finding directories in a case insentive way (Nick Craig-Wood) + * Fix finding directories in a case insensitive way (Nick Craig-Wood) * Chunker * Skip long local hashing, hash in-transit (fixes) (Ivan Andreev) * Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood) @@ -1116,7 +1116,7 @@ description: "Rclone Changelog" * Implement `--sftp-use-fstat` for unusual SFTP servers (Nick Craig-Wood) * Sugarsync * Fix NewObject for files that differ in case (Nick Craig-Wood) - * Fix finding directories in a case insentive way (Nick Craig-Wood) + * Fix finding directories in a case insensitive way (Nick Craig-Wood) * Swift * Fix deletion of parts of Static Large Object (SLO) (Nguyễn Hữu Luân) * Ensure partially uploaded large files are uploaded unless `--swift-leave-parts-on-error` (Nguyễn Hữu Luân) @@ -1190,7 +1190,7 @@ description: "Rclone Changelog" [See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2) * Bug Fixes - * acounting + * accounting * Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood) * Stabilize display order of transfers on Windows (Nick Craig-Wood) * operations @@ -2160,7 +2160,7 @@ all the docs and Edward Barker for helping re-write the front page. * rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood) * rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood) * Mount - * Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood) + * Default `--daemon-timeout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood) * Update docs to show mounting from root OK for bucket-based (Nick Craig-Wood) * Remove nonseekable flag from write files (Nick Craig-Wood) * VFS @@ -2468,7 +2468,7 @@ all the docs and Edward Barker for helping re-write the front page. * Update google cloud storage endpoints (weetmuts) * HTTP * Add an example with username and password which is supported but wasn't documented (Nick Craig-Wood) - * Fix backend with `--files-from` and non-existent files (Nick Craig-Wood) + * Fix backend with `--files-from` and nonexistent files (Nick Craig-Wood) * Hubic * Make error message more informative if authentication fails (Nick Craig-Wood) * Jottacloud @@ -2952,7 +2952,7 @@ Point release to fix hubic and azureblob backends. * FTP * Work around strange response from box FTP server * More workarounds for FTP servers to fix mkParentDir error - * Fix no error on listing non-existent directory + * Fix no error on listing nonexistent directory * Google Cloud Storage * Add service_account_credentials (Matt Holt) * Detect bucket presence by listing it - minimises permissions needed @@ -3025,7 +3025,7 @@ Point release to fix hubic and azureblob backends. * Add .deb and .rpm packages as part of the build * Make a beta release for all branches on the main repo (but not pull requests) * Bug Fixes - * config: fixes errors on non existing config by loading config file only on first access + * config: fixes errors on nonexistent config by loading config file only on first access * config: retry saving the config after failure (Mateusz) * sync: when using `--backup-dir` don't delete files if we can't set their modtime * this fixes odd behaviour with Dropbox and `--backup-dir` @@ -3560,7 +3560,7 @@ Point release to fix hubic and azureblob backends. * Update B2 docs with Data usage, and Crypt section - thanks Tomasz Mazur * S3 * Command line and config file support for - * Setting/overriding ACL - thanks Radek Senfeld + * Setting/overriding ACL - thanks Radek Šenfeld * Setting storage class - thanks Asko Tamm * Drive * Make exponential backoff work exactly as per Google specification diff --git a/docs/content/compress.md b/docs/content/compress.md index 9a2d6b974..f9f3afbc0 100644 --- a/docs/content/compress.md +++ b/docs/content/compress.md @@ -129,7 +129,7 @@ Generally -1 (default, equivalent to 5) is recommended. Levels 1 to 9 increase compression at the cost of speed. Going past 6 generally offers very little return. -Level -2 uses Huffmann encoding only. Only use if you know what you +Level -2 uses Huffman encoding only. Only use if you know what you are doing. Level 0 turns off compression. diff --git a/docs/content/crypt.md b/docs/content/crypt.md index 92390bda3..b0110f635 100644 --- a/docs/content/crypt.md +++ b/docs/content/crypt.md @@ -241,7 +241,7 @@ the password configured for an existing crypt remote means you will no longer able to decrypt any of the previously encrypted content. The only possibility is to re-upload everything via a crypt remote configured with your new password. -Depending on the size of your data, your bandwith, storage quota etc, there are +Depending on the size of your data, your bandwidth, storage quota etc, there are different approaches you can take: - If you have everything in a different location, for example on your local system, you could remove all of the prior encrypted files, change the password for your @@ -254,7 +254,7 @@ effectively decrypting everything on the fly using the old password and re-encrypting using the new password. When done, delete the original crypt remote directory and finally the rclone crypt configuration with the old password. All data will be streamed from the storage system and back, so you will -get half the bandwith and be charged twice if you have upload and download quota +get half the bandwidth and be charged twice if you have upload and download quota on the storage system. **Note**: A security problem related to the random password generator @@ -567,7 +567,7 @@ How to encode the encrypted filename to text string. This option could help with shortening the encrypted filename. The suitable option would depend on the way your remote count the filename -length and if it's case sensitve. +length and if it's case sensitive. Properties: diff --git a/docs/content/docs.md b/docs/content/docs.md index c031a94be..95609d757 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -498,7 +498,7 @@ backends can also store arbitrary user metadata. Where possible the key names are standardized, so, for example, it is possible to copy object metadata from s3 to azureblob for example and -metadata will be translated apropriately. +metadata will be translated appropriately. Some backends have limits on the size of the metadata and rclone will give errors on upload if they are exceeded. @@ -641,7 +641,7 @@ would mean limit the upload and download bandwidth to 10 MiB/s. single limit, specify the desired bandwidth in KiB/s, or use a suffix B|K|M|G|T|P. The default is `0` which means to not limit bandwidth. -The upload and download bandwidth can be specified seperately, as +The upload and download bandwidth can be specified separately, as `--bwlimit UP:DOWN`, so --bwlimit 10M:100k @@ -2011,7 +2011,7 @@ In all other cases the file will not be updated. Consider using the `--modify-window` flag to compensate for time skews between the source and the backend, for backends that do not support mod times, and instead use uploaded times. However, if the backend -does not support checksums, note that sync'ing or copying within the +does not support checksums, note that syncing or copying within the time skew window may still result in additional transfers for safety. ### --use-mmap ### diff --git a/docs/content/drive.md b/docs/content/drive.md index 7bb23ed59..0277eba31 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -1335,7 +1335,7 @@ drives found and a combined drive. upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:" Adding this to the rclone config file will cause those team drives to -be accessible with the aliases shown. Any illegal charactes will be +be accessible with the aliases shown. Any illegal characters will be substituted with "_" and duplicate names will have numbers suffixed. It will also add a remote called AllDrives which shows all the shared drives combined into one directory tree. diff --git a/docs/content/dropbox.md b/docs/content/dropbox.md index 62b351c51..a8f4fe83f 100644 --- a/docs/content/dropbox.md +++ b/docs/content/dropbox.md @@ -409,7 +409,7 @@ Properties: #### --dropbox-batch-commit-timeout -Max time to wait for a batch to finish comitting +Max time to wait for a batch to finish committing Properties: diff --git a/docs/content/filefabric.md b/docs/content/filefabric.md index 225573ba5..d66b48162 100644 --- a/docs/content/filefabric.md +++ b/docs/content/filefabric.md @@ -13,7 +13,7 @@ through a global file system. ## Configuration The initial setup for the Enterprise File Fabric backend involves -getting a token from the the Enterprise File Fabric which you need to +getting a token from the Enterprise File Fabric which you need to do in your browser. `rclone config` walks you through it. Here is an example of how to make a remote called `remote`. First run: diff --git a/docs/content/flags.md b/docs/content/flags.md index cd9801851..2426a4a4a 100644 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -313,7 +313,7 @@ and may be set in the config file. --drive-use-trash Send files to the trash instead of deleting permanently (default true) --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off) --dropbox-auth-url string Auth server URL - --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish comitting (default 10m0s) + --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) --dropbox-batch-mode string Upload file batching sync|async|off (default "sync") --dropbox-batch-size int Max number of files in upload batch --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) diff --git a/docs/content/ftp.md b/docs/content/ftp.md index 7e95a0a94..fa29a6821 100644 --- a/docs/content/ftp.md +++ b/docs/content/ftp.md @@ -138,7 +138,7 @@ can be set with [`--ftp-port`](#ftp-port). In addition to the [default restricted characters set](/overview/#restricted-characters) the following characters are also replaced: -File names cannot end with the following characters. Repacement is +File names cannot end with the following characters. Replacement is limited to the last character in a file name: | Character | Value | Replacement | diff --git a/docs/content/googlecloudstorage.md b/docs/content/googlecloudstorage.md index 61b751d15..e23090585 100644 --- a/docs/content/googlecloudstorage.md +++ b/docs/content/googlecloudstorage.md @@ -607,7 +607,7 @@ Properties: If set this will decompress gzip encoded objects. It is possible to upload objects to GCS with "Content-Encoding: gzip" -set. Normally rclone will download these files files as compressed objects. +set. Normally rclone will download these files as compressed objects. If this flag is set then rclone will decompress these files with "Content-Encoding: gzip" as they are received. This means that rclone diff --git a/docs/content/hidrive.md b/docs/content/hidrive.md index 68375d1a2..cd763cec3 100644 --- a/docs/content/hidrive.md +++ b/docs/content/hidrive.md @@ -124,7 +124,7 @@ the process is very similar to the process of initial setup exemplified before. HiDrive allows modification times to be set on objects accurate to 1 second. HiDrive supports [its own hash type](https://static.hidrive.com/dev/0001) -which is used to verify the integrety of file contents after successful transfers. +which is used to verify the integrity of file contents after successful transfers. ### Restricted filename characters diff --git a/docs/content/install.md b/docs/content/install.md index 714794f3d..f69b04fe7 100644 --- a/docs/content/install.md +++ b/docs/content/install.md @@ -360,7 +360,7 @@ the system. Both scheduled task and Windows service can be used to achieve this. NOTE: Remember that when rclone runs as the `SYSTEM` user, the user profile that it sees will not be yours. This means that if you normally run rclone with configuration file in the default location, to be able to use the same configuration -when running as the system user you must explicitely tell rclone where to find +when running as the system user you must explicitly tell rclone where to find it with the [`--config`](https://rclone.org/docs/#config-config-file) option, or else it will look in the system users profile path (`C:\Windows\System32\config\systemprofile`). To test your command manually from a Command Prompt, you can run it with @@ -424,7 +424,7 @@ it should be possible through path rewriting as described [here](https://github. To Windows service running any rclone command, the excellent third-party utility [NSSM](http://nssm.cc), the "Non-Sucking Service Manager", can be used. -It includes some advanced features such as adjusting process periority, defining +It includes some advanced features such as adjusting process priority, defining process environment variables, redirect to file anything written to stdout, and customized response to different exit codes, with a GUI to configure everything from (although it can also be used from command line ). diff --git a/docs/content/jottacloud.md b/docs/content/jottacloud.md index 87b1e9e28..4a61d2e78 100644 --- a/docs/content/jottacloud.md +++ b/docs/content/jottacloud.md @@ -18,7 +18,7 @@ it also provides white-label solutions to different companies, such as: * Elgiganten Sweden (cloud.elgiganten.se) * Elgiganten Denmark (cloud.elgiganten.dk) * Giganti Cloud (cloud.gigantti.fi) - * ELKO Clouud (cloud.elko.is) + * ELKO Cloud (cloud.elko.is) Most of the white-label versions are supported by this backend, although may require different authentication setup - described below. diff --git a/docs/content/mega.md b/docs/content/mega.md index cd26c7010..7cc95adb6 100644 --- a/docs/content/mega.md +++ b/docs/content/mega.md @@ -110,7 +110,7 @@ Use `rclone dedupe` to fix duplicated files. #### Object not found If you are connecting to your Mega remote for the first time, -to test access and syncronisation, you may receive an error such as +to test access and synchronization, you may receive an error such as ``` Failed to create file system for "my-mega-remote:": diff --git a/docs/content/netstorage.md b/docs/content/netstorage.md index 0c1bef5bd..eab479c1c 100644 --- a/docs/content/netstorage.md +++ b/docs/content/netstorage.md @@ -152,7 +152,7 @@ Individual symlink files on the remote can be used with the commands like "cat" With NetStorage, directories can exist in one of two forms: 1. **Explicit Directory**. This is an actual, physical directory that you have created in a storage group. -2. **Implicit Directory**. This refers to a directory within a path that has not been physically created. For example, during upload of a file, non-existent subdirectories can be specified in the target path. NetStorage creates these as "implicit." While the directories aren't physically created, they exist implicitly and the noted path is connected with the uploaded file. +2. **Implicit Directory**. This refers to a directory within a path that has not been physically created. For example, during upload of a file, nonexistent subdirectories can be specified in the target path. NetStorage creates these as "implicit." While the directories aren't physically created, they exist implicitly and the noted path is connected with the uploaded file. Rclone will intercept all file uploads and mkdir commands for the NetStorage remote and will explicitly issue the mkdir command for each directory in the uploading path. This will help with the interoperability with the other Akamai services such as SFTP and the Content Management Shell (CMShell). Rclone will not guarantee correctness of operations with implicit directories which might have been created as a result of using an upload API directly. diff --git a/docs/content/onedrive.md b/docs/content/onedrive.md index b7a40ba78..0f6897e34 100644 --- a/docs/content/onedrive.md +++ b/docs/content/onedrive.md @@ -568,7 +568,7 @@ An official document about the limitations for different types of OneDrive can b ## Versions Every change in a file OneDrive causes the service to create a new -version of the the file. This counts against a users quota. For +version of the file. This counts against a users quota. For example changing the modification time of a file creates a second version, so the file apparently uses twice the space. diff --git a/docs/content/overview.md b/docs/content/overview.md index 2a9301acf..e20543238 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -111,7 +111,7 @@ systems they must support a common hash type. ### ModTime ### -Allmost all cloud storage systems store some sort of timestamp +Almost all cloud storage systems store some sort of timestamp on objects, but several of them not something that is appropriate to use for syncing. E.g. some backends will only write a timestamp that represent the time of the upload. To be relevant for syncing diff --git a/docs/content/rc.md b/docs/content/rc.md index fc6316e51..9d87e2e65 100644 --- a/docs/content/rc.md +++ b/docs/content/rc.md @@ -397,7 +397,7 @@ The parameters can be a string as per the rest of rclone, eg `s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as JSON blobs. -If specifyng a JSON blob it should be a object mapping strings to +If specifying a JSON blob it should be a object mapping strings to strings. These values will be used to configure the remote. There are 3 special values which may be set: @@ -1568,7 +1568,7 @@ check that parameter passing is working properly. **Authentication is required for this call.** -### sync/bisync: Perform bidirectonal synchronization between two paths. {#sync-bisync} +### sync/bisync: Perform bidirectional synchronization between two paths. {#sync-bisync} This takes the following parameters diff --git a/docs/content/s3.md b/docs/content/s3.md index dc5fd6d47..d09dd3592 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -332,7 +332,7 @@ upload. Rclone's default directory traversal is to process each directory individually. This takes one API call per directory. Using the -`--fast-list` flag will read all info about the the objects into +`--fast-list` flag will read all info about the objects into memory first using a smaller number of API calls (one per 1000 objects). See the [rclone docs](/docs/#fast-list) for more details. diff --git a/docs/content/sftp.md b/docs/content/sftp.md index 8f3066a93..48ebfb3a3 100644 --- a/docs/content/sftp.md +++ b/docs/content/sftp.md @@ -21,7 +21,7 @@ SSH installations. Paths are specified as `remote:path`. If the path does not begin with a `/` it is relative to the home directory of the user. An empty path `remote:` refers to the user's home directory. For example, `rclone lsd remote:` -would list the home directory of the user cofigured in the rclone remote config +would list the home directory of the user configured in the rclone remote config (`i.e /home/sftpuser`). However, `rclone lsd remote:/` would list the root directory for remote machine (i.e. `/`) @@ -264,7 +264,7 @@ can also run a SSH server, which is a port of OpenSSH (see official [installation guide](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)). On a Windows server the shell handling is different: Although it can also be set up to use a Unix type shell, e.g. Cygwin bash, the default is to use Windows Command Prompt (cmd.exe), and PowerShell is a recommended -alternative. All of these have bahave differently, which rclone must handle. +alternative. All of these have behave differently, which rclone must handle. Rclone tries to auto-detect what type of shell is used on the server, first time you access the SFTP remote. If a remote shell session is @@ -296,7 +296,7 @@ a new sftp remote is accessed. If you configure a sftp remote without a config file, e.g. an [on the fly](/docs/#backend-path-to-dir]) remote, rclone will have nowhere to store the result, and it will re-run the command on every access. To avoid this you should -explicitely set the `shell_type` option to the correct value, +explicitly set the `shell_type` option to the correct value, or to `none` if you want to prevent rclone from executing any remote shell commands. @@ -304,7 +304,7 @@ It is also important to note that, since the shell type decides how quoting and escaping of file paths used as command-line arguments are performed, configuring the wrong shell type may leave you exposed to command injection exploits. Make sure to confirm the auto-detected -shell type, or explicitely set the shell type you know is correct, +shell type, or explicitly set the shell type you know is correct, or disable shell access until you know. ### Checksum diff --git a/fs/accounting/stats.go b/fs/accounting/stats.go index fc17f95ac..3101ef993 100644 --- a/fs/accounting/stats.go +++ b/fs/accounting/stats.go @@ -278,7 +278,7 @@ type transferStats struct { speed float64 } -// calculateTransferStats calculates some addtional transfer stats not +// calculateTransferStats calculates some additional transfer stats not // stored directly in StatsInfo func (s *StatsInfo) calculateTransferStats() (ts transferStats) { // checking and transferring have their own locking so read diff --git a/fs/cache/cache_test.go b/fs/cache/cache_test.go index c7620e25c..c03aa7dfa 100644 --- a/fs/cache/cache_test.go +++ b/fs/cache/cache_test.go @@ -154,7 +154,7 @@ func TestPin(t *testing.T) { cleanup, create := mockNewFs(t) defer cleanup() - // Test pinning and unpinning non-existent + // Test pinning and unpinning nonexistent f := mockfs.NewFs(context.Background(), "mock", "/alien") Pin(f) Unpin(f) diff --git a/fs/config/configfile/configfile.go b/fs/config/configfile/configfile.go index e9c498995..572ac15e1 100644 --- a/fs/config/configfile/configfile.go +++ b/fs/config/configfile/configfile.go @@ -40,7 +40,7 @@ func (s *Storage) check() { if err == nil { // check to see if config file has changed and if it has, reload it if s.fi == nil || !fi.ModTime().Equal(s.fi.ModTime()) || fi.Size() != s.fi.Size() { - fs.Debugf(nil, "Config file has changed externaly - reloading") + fs.Debugf(nil, "Config file has changed externally - reloading") err := s._load() if err != nil { fs.Errorf(nil, "Failed to read config file - using previous config: %v", err) diff --git a/fs/config/configmap/configmap.go b/fs/config/configmap/configmap.go index 516a91992..ee8d4918f 100644 --- a/fs/config/configmap/configmap.go +++ b/fs/config/configmap/configmap.go @@ -137,7 +137,7 @@ func (c Simple) Set(key, value string) { } // String the map value the same way the config parser does, but with -// sorted keys for reproducability. +// sorted keys for reproducibility. func (c Simple) String() string { var ks = make([]string, 0, len(c)) for k := range c { diff --git a/fs/dirtree/dirtree.go b/fs/dirtree/dirtree.go index 2785dcbed..6b4be8b6d 100644 --- a/fs/dirtree/dirtree.go +++ b/fs/dirtree/dirtree.go @@ -1,5 +1,5 @@ // Package dirtree contains the DirTree type which is used for -// building filesystem heirachies in memory. +// building filesystem hierarchies in memory. package dirtree import ( diff --git a/fs/newfs.go b/fs/newfs.go index f64cb2c8d..adf8582fa 100644 --- a/fs/newfs.go +++ b/fs/newfs.go @@ -33,7 +33,7 @@ func NewFs(ctx context.Context, path string) (Fs, error) { overridden := fsInfo.Options.Overridden(config) if len(overridden) > 0 { extraConfig := overridden.String() - //Debugf(nil, "detected overriden config %q", extraConfig) + //Debugf(nil, "detected overridden config %q", extraConfig) md5sumBinary := md5.Sum([]byte(extraConfig)) suffix := base64.RawURLEncoding.EncodeToString(md5sumBinary[:]) // 5 characters length is 5*6 = 30 bits of base64 diff --git a/fs/operations/check_test.go b/fs/operations/check_test.go index 21692486d..be22051fe 100644 --- a/fs/operations/check_test.go +++ b/fs/operations/check_test.go @@ -186,11 +186,11 @@ func TestCheck(t *testing.T) { func TestCheckFsError(t *testing.T) { ctx := context.Background() - dstFs, err := fs.NewFs(ctx, "non-existent") + dstFs, err := fs.NewFs(ctx, "nonexistent") if err != nil { t.Fatal(err) } - srcFs, err := fs.NewFs(ctx, "non-existent") + srcFs, err := fs.NewFs(ctx, "nonexistent") if err != nil { t.Fatal(err) } diff --git a/fs/operations/lsjson_test.go b/fs/operations/lsjson_test.go index d5d7ca319..a8943cfbd 100644 --- a/fs/operations/lsjson_test.go +++ b/fs/operations/lsjson_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -// Compare a and b in a file system idependent way +// Compare a and b in a file system independent way func compareListJSONItem(t *testing.T, a, b *operations.ListJSONItem, precision time.Duration) { assert.Equal(t, a.Path, b.Path, "Path") assert.Equal(t, a.Name, b.Name, "Name") diff --git a/fs/operations/operations.go b/fs/operations/operations.go index 2bfddd07d..59de5aac0 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -1787,7 +1787,7 @@ func copyURLFn(ctx context.Context, dstFileName string, url string, autoFilename _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Disposition")) headerFilename := path.Base(strings.Replace(params["filename"], "\\", "/", -1)) if err != nil || headerFilename == "" { - return fmt.Errorf("CopyURL failed: filename not found in the Content-Dispoition header") + return fmt.Errorf("CopyURL failed: filename not found in the Content-Disposition header") } fs.Debugf(headerFilename, "filename found in Content-Disposition header.") return fn(ctx, headerFilename, resp.Body, resp.ContentLength, modTime) diff --git a/fs/rc/webgui/plugins.go b/fs/rc/webgui/plugins.go index 23f9692c2..859e6d999 100644 --- a/fs/rc/webgui/plugins.go +++ b/fs/rc/webgui/plugins.go @@ -227,14 +227,14 @@ func (p *Plugins) GetPluginByName(name string) (out *PackageJSON, err error) { } -// getAuthorRepoBranchGithub gives author, repoName and branch from a github.com url +// getAuthorRepoBranchGitHub gives author, repoName and branch from a github.com url // // url examples: // https://github.com/rclone/rclone-webui-react/ // http://github.com/rclone/rclone-webui-react // https://github.com/rclone/rclone-webui-react/tree/caman-js // github.com/rclone/rclone-webui-react -func getAuthorRepoBranchGithub(url string) (author string, repoName string, branch string, err error) { +func getAuthorRepoBranchGitHub(url string) (author string, repoName string, branch string, err error) { repoURL := url repoURL = strings.Replace(repoURL, "https://", "", 1) repoURL = strings.Replace(repoURL, "http://", "", 1) diff --git a/fs/rc/webgui/rc.go b/fs/rc/webgui/rc.go index 8c54040e3..c3bada21f 100644 --- a/fs/rc/webgui/rc.go +++ b/fs/rc/webgui/rc.go @@ -102,7 +102,7 @@ func rcAddPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) { return nil, err } - author, repoName, repoBranch, err := getAuthorRepoBranchGithub(pluginURL) + author, repoName, repoBranch, err := getAuthorRepoBranchGitHub(pluginURL) if err != nil { return nil, err } diff --git a/fs/registry.go b/fs/registry.go index 33dba915b..adf891ad2 100644 --- a/fs/registry.go +++ b/fs/registry.go @@ -28,7 +28,7 @@ type RegInfo struct { // Prefix for command line flags for this fs - defaults to Name if not set Prefix string // Create a new file system. If root refers to an existing - // object, then it should return an Fs which which points to + // object, then it should return an Fs which points to // the parent of that object and ErrorIsFile. NewFs func(ctx context.Context, name string, root string, config configmap.Mapper) (Fs, error) `json:"-"` // Function to call to help with config - see docs for ConfigIn for more info @@ -179,7 +179,7 @@ func (o *Option) MarshalJSON() ([]byte, error) { }) } -// GetValue gets the current current value which is the default if not set +// GetValue gets the current value which is the default if not set func (o *Option) GetValue() interface{} { val := o.Value if val == nil { diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index 28e02a44b..3b9767eaf 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -531,14 +531,14 @@ func Run(t *testing.T, opt *Opt) { assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp") }) - // TestFsRmdirNotFound tests deleting a non-existent directory + // TestFsRmdirNotFound tests deleting a nonexistent directory t.Run("FsRmdirNotFound", func(t *testing.T) { skipIfNotOk(t) if isBucketBasedButNotRoot(f) { t.Skip("Skipping test as non root bucket-based remote") } err := f.Rmdir(ctx, "") - assert.Error(t, err, "Expecting error on Rmdir non-existent") + assert.Error(t, err, "Expecting error on Rmdir nonexistent") }) // Make the directory @@ -729,7 +729,7 @@ func Run(t *testing.T, opt *Opt) { o, err := f.NewObject(ctx, "potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) - // Now try an object in a non existing directory + // Now try an object in a nonexistent directory o, err = f.NewObject(ctx, "directory/not/found/potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) @@ -1632,7 +1632,7 @@ func Run(t *testing.T, opt *Opt) { fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision()) }) - // Check that that listing the entries is OK + // Check that listing the entries is OK t.Run("ListEntries", func(t *testing.T) { entries, err := rootRemote.List(context.Background(), configLeaf) require.NoError(t, err) @@ -2068,7 +2068,7 @@ func Run(t *testing.T, opt *Opt) { // TestFsRootCollapse tests if the root of an fs "collapses" to the // absolute root. It creates a new fs of the same backend type with its - // root set to a *non-existent* folder, and attempts to read the info of + // root set to a *nonexistent* folder, and attempts to read the info of // an object in that folder, whose name is taken from a directory that // exists in the absolute root. // This test is added after diff --git a/fstest/run.go b/fstest/run.go index a9b8b0a46..8751b3d23 100644 --- a/fstest/run.go +++ b/fstest/run.go @@ -17,7 +17,7 @@ func TestMkdir(t *testing.T) { // test stuff } -This will make r.Fremote and r.Flocal for a remote remote and a local +This will make r.Fremote and r.Flocal for a remote and a local remote. The remote is determined by the -remote flag passed in. */ diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go index 415e167c3..71e1a4dbb 100644 --- a/lib/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -158,7 +158,7 @@ func TestCachePin(t *testing.T) { _, err := c.Get("/", create) require.NoError(t, err) - // Pin a non-existent item to show nothing happens + // Pin a nonexistent item to show nothing happens c.Pin("notfound") c.mu.Lock() @@ -312,7 +312,7 @@ func TestCacheRename(t *testing.T) { assert.Equal(t, 2, c.Entries()) - // rename to non-existent + // rename to nonexistent value, found := c.Rename("existing1", "EXISTING1") assert.Equal(t, true, found) assert.Equal(t, existing1, value) @@ -326,7 +326,7 @@ func TestCacheRename(t *testing.T) { assert.Equal(t, 1, c.Entries()) - // rename non-existent + // rename nonexistent value, found = c.Rename("notfound", "NOTFOUND") assert.Equal(t, false, found) assert.Nil(t, value) diff --git a/lib/dircache/dircache.go b/lib/dircache/dircache.go index 920215e08..8076e7c8f 100644 --- a/lib/dircache/dircache.go +++ b/lib/dircache/dircache.go @@ -140,7 +140,7 @@ func (dc *DirCache) SetRootIDAlias(rootID string) { dc.Put("", dc.rootID) } -// FlushDir flushes the map of all data starting with with the path +// FlushDir flushes the map of all data starting with the path // dir. // // If dir is empty string then this is equivalent to calling ResetRoot diff --git a/lib/jwtutil/jwtutil.go b/lib/jwtutil/jwtutil.go index 583036be9..dfb4dc33b 100644 --- a/lib/jwtutil/jwtutil.go +++ b/lib/jwtutil/jwtutil.go @@ -70,8 +70,8 @@ func Config(id, name string, claims *jws.ClaimSet, header *jws.Header, queryPara return fmt.Errorf("jwtutil: failed making auth request: %w", err) } defer func() { - deferedErr := resp.Body.Close() - if deferedErr != nil { + deferredErr := resp.Body.Close() + if deferredErr != nil { err = fmt.Errorf("jwtutil: failed to close resp.Body: %w", err) } }() diff --git a/lib/rest/rest.go b/lib/rest/rest.go index 69bb1deff..a424b9d3e 100644 --- a/lib/rest/rest.go +++ b/lib/rest/rest.go @@ -419,7 +419,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte // opts.Body are set then CallJSON will do a multipart upload with a // file attached. opts.MultipartContentName is the name of the // parameter and opts.MultipartFileName is the name of the file. If -// MultpartContentName is set, and request != nil is supplied, then +// MultipartContentName is set, and request != nil is supplied, then // the request will be marshalled into JSON and added to the form with // parameter name MultipartMetadataName. // diff --git a/librclone/librclone/librclone.go b/librclone/librclone/librclone.go index 5b29e4c59..1e7dd6e11 100644 --- a/librclone/librclone/librclone.go +++ b/librclone/librclone/librclone.go @@ -1,7 +1,7 @@ // Package librclone exports shims for library use // // This is the internal implementation which is used for C and -// Gomobile libaries which need slightly different export styles. +// Gomobile libraries which need slightly different export styles. // // The shims are a thin wrapper over the rclone RPC. package librclone diff --git a/vfs/dir.go b/vfs/dir.go index 7f6830582..c45033d0e 100644 --- a/vfs/dir.go +++ b/vfs/dir.go @@ -186,7 +186,7 @@ func (d *Dir) ForgetAll() (hasVirtual bool) { } } } - // Purge any unecessary virtual entries + // Purge any unnecessary virtual entries d._purgeVirtual() d.read = time.Time{} diff --git a/vfs/vfscache/cache_test.go b/vfs/vfscache/cache_test.go index 6755dcee4..bd08736a0 100644 --- a/vfs/vfscache/cache_test.go +++ b/vfs/vfscache/cache_test.go @@ -450,7 +450,7 @@ func TestCachePurgeClean(t *testing.T) { _, err = os.Stat(potato1.c.toOSPath(potato1.name)) require.NoError(t, err) - // Add some potatos + // Add some potatoes potato2 := c.Item("sub/dir/potato2") require.NoError(t, potato2.Open(nil)) require.NoError(t, potato2.Truncate(5)) @@ -603,7 +603,7 @@ func TestCacheRename(t *testing.T) { assertPathNotExist(t, osPathMeta) assert.False(t, c.Exists("sub/newPotato")) - // non-existent file - is ignored + // nonexistent file - is ignored assert.NoError(t, c.Rename("nonexist", "nonexist2", nil)) } diff --git a/vfs/vfscache/item.go b/vfs/vfscache/item.go index 36355d981..b7f40252d 100644 --- a/vfs/vfscache/item.go +++ b/vfs/vfscache/item.go @@ -1152,7 +1152,7 @@ func (item *Item) _ensure(offset, size int64) (err error) { // This is called by the downloader downloading file segments and the // vfs layer writing to the file. // -// This doesn't mark the item as Dirty - that the the responsibility +// This doesn't mark the item as Dirty - that the responsibility // of the caller as we don't know here whether we are adding reads or // writes to the cache file. // diff --git a/vfs/vfscache/writeback/writeback_test.go b/vfs/vfscache/writeback/writeback_test.go index 512d0eeb7..cfc7f4fa9 100644 --- a/vfs/vfscache/writeback/writeback_test.go +++ b/vfs/vfscache/writeback/writeback_test.go @@ -408,7 +408,7 @@ func TestWriteBackAddUpdateNotModified(t *testing.T) { pi2 := newPutItem(t) id2 := wb.Add(id, "one", false, pi2.put) assert.Equal(t, id, id2) - checkNotOnHeap(t, wb, wbItem) // object still being transfered + checkNotOnHeap(t, wb, wbItem) // object still being transferred checkInLookup(t, wb, wbItem) // Because modified was false above this should not cancel the @@ -525,7 +525,7 @@ func TestWriteBackMaxQueue(t *testing.T) { assert.Equal(t, toTransfer-maxTransfers, queued) assert.Equal(t, maxTransfers, inProgress) - // now finish the the first maxTransfers + // now finish the first maxTransfers for i := 0; i < maxTransfers; i++ { pis[i].finish(nil) }