mirror of
https://github.com/rclone/rclone
synced 2025-02-18 14:11:27 +01:00
docs: fix typos found by codespell in docs and code comments
This commit is contained in:
parent
5316acd046
commit
b1d4de69c2
backend
bin
cmd
bisync
config
copyurl
serve
docs/content
fs
fstest/fstests
vfs
@ -899,7 +899,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
|
@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// build request
|
||||
// cant use normal rename as file needs to be "activated" first
|
||||
// can't use normal rename as file needs to be "activated" first
|
||||
|
||||
r := api.NewUpdateFileInfo()
|
||||
r.DocumentID = doc.DocumentID
|
||||
|
@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
// JobIDResponse represents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
// create a new oauth client, reuse the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
@ -3344,7 +3344,7 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
||||
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
||||
urlEncodeListings = true // URL encode the listings to help with control characters
|
||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
||||
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
|
||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
@ -6057,7 +6057,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
|
@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
@ -52,12 +52,12 @@ func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
@ -65,7 +65,7 @@ func main() {
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
|
@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
|
@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
|
||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
// checkListing verifies that listing is not empty (unless resyncing)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
return nil
|
||||
|
@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
[36m(05) :[0m [34mmove-listings empty-path1[0m
|
||||
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in synching all content to path2.[0m
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in syncing all content to path2.[0m
|
||||
[36m(07) :[0m [34mpurge-children {path2/}[0m
|
||||
[36m(08) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
|
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
@ -1,6 +1,6 @@
|
||||
test resync
|
||||
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
|
||||
# 2. Resync with empty Path2, resulting in synching all content TO Path2
|
||||
# 2. Resync with empty Path2, resulting in syncing all content TO Path2
|
||||
# 3. Exercise all of the various file difference scenarios during a resync:
|
||||
# File Path1 Path2 Expected action Who wins
|
||||
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
|
||||
@ -17,7 +17,7 @@ purge-children {path1/}
|
||||
bisync resync
|
||||
move-listings empty-path1
|
||||
|
||||
test 2. resync with empty path2, resulting in synching all content to path2.
|
||||
test 2. resync with empty path2, resulting in syncing all content to path2.
|
||||
purge-children {path2/}
|
||||
bisync resync
|
||||
move-listings empty-path2
|
||||
|
@ -549,12 +549,12 @@ password to re-encrypt the config.
|
||||
|
||||
When |--password-command| is called to change the password then the
|
||||
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
|
@ -54,7 +54,7 @@ destination if there is one with the same name.
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
### Troublshooting
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
|
||||
|
@ -194,7 +194,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
|
||||
return file.Chown(uid, gid)
|
||||
}
|
||||
|
||||
// Chtimes changes the acces time and modified time
|
||||
// Chtimes changes the access time and modified time
|
||||
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
|
||||
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
|
||||
return f.vfs.Chtimes(name, atime, mtime)
|
||||
|
@ -145,7 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
||||
|
||||
|
@ -158,7 +158,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetObject fetchs the object from the filesystem.
|
||||
// GetObject fetches the object from the filesystem.
|
||||
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
@ -400,7 +400,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
|
||||
}
|
||||
|
||||
fp := path.Join(bucketName, objectName)
|
||||
// S3 does not report an error when attemping to delete a key that does not exist, so
|
||||
// S3 does not report an error when attempting to delete a key that does not exist, so
|
||||
// we need to skip IsNotExist errors.
|
||||
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
for _, entry := range dirEntries {
|
||||
object := entry.Name()
|
||||
|
||||
// workround for control-chars detect
|
||||
// workaround for control-chars detect
|
||||
objectPath := path.Join(fdPath, object)
|
||||
|
||||
if !strings.HasPrefix(object, name) {
|
||||
|
@ -487,7 +487,7 @@ See the [bisync filters](#filtering) section and generic
|
||||
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An [example filters file](#example-filters-file) contains filters for
|
||||
non-allowed files for synching with Dropbox.
|
||||
non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run
|
||||
with `--resync`. This is a safety feature, which prevents existing files
|
||||
@ -664,7 +664,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
||||
sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with `--check-sync=only`. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||
actual files on the remotes.** Note also that the listing snapshots will not
|
||||
@ -1141,7 +1141,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
||||
|
||||
### How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for synching.
|
||||
Filtering portions of the directory tree is a critical feature for syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync:
|
||||
@ -1250,7 +1250,7 @@ quashed by adding `--quiet` to the bisync command line.
|
||||
|
||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||
|
||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
||||
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||
The `- <filename>` filters exclude these files where ever they may occur
|
||||
in the sync tree. Consider adding similar exclusions for file types
|
||||
you don't need to sync, such as core dump and software build files.
|
||||
@ -1584,7 +1584,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
||||
|
||||
- `go test . -case basic -remote local -remote2 local`
|
||||
runs the `test_basic` test case using only the local filesystem,
|
||||
synching one local directory with another local directory.
|
||||
syncing one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the `.../workdir/test.log` file,
|
||||
which is finally compared to the golden copy.
|
||||
@ -1860,4 +1860,4 @@ causing bisync to consider more files than necessary due to overbroad filters du
|
||||
* Added [new `--ignore-listing-checksum` flag](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes)
|
||||
to distinguish from `--ignore-checksum`
|
||||
* [Performance improvements](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20Deletes%20take%20several%20times%20longer%20than%20copies) for large remotes
|
||||
* Documentation and testing improvements
|
||||
* Documentation and testing improvements
|
||||
|
@ -741,7 +741,7 @@ strong random number generator. The nonce is incremented for each
|
||||
chunk read making sure each nonce is unique for each block written.
|
||||
The chance of a nonce being reused is minuscule. If you wrote an
|
||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||
approximately 2×10⁻³² of re-using a nonce.
|
||||
approximately 2×10⁻³² of reusing a nonce.
|
||||
|
||||
#### Chunk
|
||||
|
||||
|
@ -2930,7 +2930,7 @@ so they take exactly the same form.
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
treated slightly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
|
@ -384,7 +384,7 @@ Use the gphotosdl proxy for downloading the full resolution images
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
However if you use the gphotosdl proxy then you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
@ -2068,7 +2068,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The `expiry` time is the time until the file is elegible for being
|
||||
The `expiry` time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers `--transfers` files at once, only the lowest
|
||||
`--transfers` expiry times will have `uploading` as `true`. So there
|
||||
|
@ -750,7 +750,7 @@ Notes on above:
|
||||
that `USER_NAME` has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
|
||||
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
||||
that will generate one or more buckets that will work with `rclone sync`.
|
||||
|
@ -133,7 +133,7 @@ func TestCertificates(t *testing.T) {
|
||||
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
|
||||
}
|
||||
|
||||
// Write some test data to fullfil the request
|
||||
// Write some test data to fulfill the request
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
_, _ = fmt.Fprintln(w, "test data")
|
||||
}))
|
||||
|
@ -95,7 +95,7 @@ func LogValueHide(key string, value interface{}) LogValueItem {
|
||||
return LogValueItem{key: key, value: value, render: false}
|
||||
}
|
||||
|
||||
// String returns the representation of value. If render is fals this
|
||||
// String returns the representation of value. If render is false this
|
||||
// is an empty string so LogValueItem entries won't show in the
|
||||
// textual representation of logs.
|
||||
func (j LogValueItem) String() string {
|
||||
|
@ -297,7 +297,7 @@ func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.R
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// This re-uses the internal buffer if at all possible.
|
||||
// This reuses the internal buffer if at all possible.
|
||||
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size == 0 {
|
||||
|
@ -142,7 +142,7 @@ func TestMemoryObject(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, "Rutabaga")
|
||||
assert.Equal(t, newNow, o.ModTime(context.Background()))
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we reused the buffer
|
||||
|
||||
// not within the buffer
|
||||
newStr := "0123456789"
|
||||
|
@ -2391,7 +2391,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
var itemCopy = item
|
||||
itemCopy.Path += ".copy"
|
||||
|
||||
// Set copy cutoff to mininum value so we make chunks
|
||||
// Set copy cutoff to minimum value so we make chunks
|
||||
origCutoff, err := do.SetCopyCutoff(minChunkSize)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
|
@ -464,7 +464,7 @@ the |--vfs-cache-mode| is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The |expiry| time is the time until the file is elegible for being
|
||||
The |expiry| time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers |--transfers| files at once, only the lowest
|
||||
|--transfers| expiry times will have |uploading| as |true|. So there
|
||||
|
@ -216,7 +216,7 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS {
|
||||
configName := fs.ConfigString(f)
|
||||
for _, activeVFS := range active[configName] {
|
||||
if vfs.Opt == activeVFS.Opt {
|
||||
fs.Debugf(f, "Re-using VFS from active cache")
|
||||
fs.Debugf(f, "Reusing VFS from active cache")
|
||||
activeVFS.inUse.Add(1)
|
||||
return activeVFS
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ func TestItemReloadCacheStale(t *testing.T) {
|
||||
assert.Equal(t, int64(110), fi.Size())
|
||||
|
||||
// Write to the file to make it dirty
|
||||
// This checks we aren't re-using stale data
|
||||
// This checks we aren't reusing stale data
|
||||
n, err := item.WriteAt([]byte("HELLO"), 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
Loading…
Reference in New Issue
Block a user