1
mirror of https://github.com/rclone/rclone synced 2024-12-22 13:03:02 +01:00

Merge branch 'master' of github.com:rclone/rclone into gh-actions-code-ql

This commit is contained in:
Aaron Gokaslan 2024-08-18 11:45:54 -07:00
commit 1da511b486
923 changed files with 195499 additions and 87470 deletions

4
.gitattributes vendored
View File

@ -1,3 +1,7 @@
# Go writes go.mod and go.sum with lf even on windows
go.mod text eol=lf
go.sum text eol=lf
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true

View File

@ -27,12 +27,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.22.0-rc.1'
go: '>=1.23.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@ -43,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.22.0-rc.1'
go: '>=1.23.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '>=1.22.0-rc.1'
os: macos-latest
go: '>=1.23.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@ -58,15 +58,15 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macos-11
go: '>=1.22.0-rc.1'
os: macos-latest
go: '>=1.23.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.22.0-rc.1'
go: '>=1.23.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@ -76,23 +76,23 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.22.0-rc.1'
go: '>=1.23.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.20
os: ubuntu-latest
go: '1.20'
quicktest: true
racequicktest: true
- job_name: go1.21
os: ubuntu-latest
go: '1.21'
quicktest: true
racequicktest: true
- job_name: go1.22
os: ubuntu-latest
go: '1.22'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
@ -124,7 +124,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@ -137,7 +137,8 @@ jobs:
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
brew install git-annex git-annex-remote-rclone
if: matrix.os == 'macos-latest'
- name: Install Libraries on Windows
shell: powershell
@ -167,14 +168,6 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
@ -230,21 +223,71 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Get runner parameters
id: get-runner-parameters
shell: bash
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v4
- name: Code quality test
uses: golangci/golangci-lint-action@v4
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version: '>=1.22.0-rc.1'
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
@ -268,15 +311,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.22.0-rc.1'
- name: Go module cache
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
go-version: '>=1.23.0-rc.1'
- name: Set global environment variables
shell: bash

View File

@ -56,7 +56,7 @@ jobs:
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .

15
.github/workflows/notify.yml vendored Normal file
View File

@ -0,0 +1,15 @@
name: Notify users based on issue labels
on:
issues:
types: [labeled]
jobs:
notify:
runs-on: ubuntu-latest
steps:
- uses: jenschelkopf/issue-label-notification-action@1.3
with:
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
recipients: |
Support Contract=@rclone/support

6
.gitignore vendored
View File

@ -3,10 +3,13 @@ _junk/
rclone
rclone.exe
build
docs/public
/docs/public/
/docs/.hugo_build.lock
/docs/static/img/logos/
rclone.iml
.idea
.history
.vscode
*.test
*.iml
fuzz-build.zip
@ -15,6 +18,5 @@ fuzz-build.zip
Thumbs.db
__pycache__
.DS_Store
/docs/static/img/logos/
resource_windows_*.syso
.devcontainer

View File

@ -13,6 +13,7 @@ linters:
- stylecheck
- unused
- misspell
- gocritic
#- prealloc
#- maligned
disable-all: true
@ -98,3 +99,11 @@ linters-settings:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
disabled-checks:
- appendAssign
- captLocal
- commentFormatting
- exitAfterDefer
- ifElseChain
- singleCaseSwitch

View File

@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
test_all -backends drive
### Full integration testing
@ -508,7 +508,7 @@ You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from genric config questions (eg `region` and `endpoint).
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.

View File

@ -21,6 +21,8 @@ Current active maintainers of rclone are:
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
**This is a work in progress Draft**

34883
MANUAL.html generated

File diff suppressed because it is too large Load Diff

5469
MANUAL.md generated

File diff suppressed because it is too large Load Diff

20323
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@ -36,13 +36,14 @@ ifdef BETA_SUBDIR
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD_ROOT := beta.rclone.org:
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
.PHONY: rclone test_all vars version
@ -50,7 +51,7 @@ rclone:
ifeq ($(GO_OS),windows)
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
endif
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
ifeq ($(GO_OS),windows)
rm resource_windows_`go env GOARCH`.syso
endif
@ -59,7 +60,7 @@ endif
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
vars:
@echo SHELL="'$(SHELL)'"
@ -87,13 +88,13 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone
@ -167,7 +168,7 @@ website:
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
rclone -v sync docs/public www.rclone.org:
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
@ -194,8 +195,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
rclone -P copy build/ downloads.rclone.org:/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
upload_github:
./bin/upload-github $(TAG)
@ -205,7 +206,7 @@ cross: doc
beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
rclone -v copy build/ pub.rclone.org:/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release:
@ -218,18 +219,18 @@ ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)
@ -238,7 +239,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server -v -w --disableFastRender
cd docs && hugo server --logLevel info -w --disableFastRender
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

View File

@ -1,7 +1,23 @@
<div align="center">
<sup>Special thanks to our sponsor:</sup>
<br>
<br>
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
<div>
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
</div>
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
<div>
<sup>Visit warp.dev to learn more.</sup>
</div>
</a>
<br>
<hr>
</div>
<br>
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[<img src="https://rclone.org/img/logos/warp-github-light.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-light-mode-only)
[<img src="https://rclone.org/img/logos/warp-github-dark.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-dark-mode-only)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@ -39,7 +55,9 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
@ -57,6 +75,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
@ -76,6 +95,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
@ -94,6 +114,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@ -37,18 +37,44 @@ This file describes how to make the various kinds of releases
## Update dependencies
Early in the next release cycle update the dependencies
Early in the next release cycle update the dependencies.
* Review any pinned packages in go.mod and remove if possible
* make updatedirect
* make GOTAGS=cmount
* make compiletest
* git commit -a -v
* make update
* make GOTAGS=cmount
* make compiletest
* `make updatedirect`
* `make GOTAGS=cmount`
* `make compiletest`
* Fix anything which doesn't compile at this point and commit changes here
* `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod`
then go to manual mode. `go1.20` here is the lowest supported version
in the `go.mod`.
```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.20 -compat=1.20
```
If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when
done
```
git co go.mod go.sum
```
And try again.
Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix.
* `make update`
* `make GOTAGS=cmount`
* `make compiletest`
* roll back any updates which didn't compile
* git commit -a -v --amend
* `git commit -a -v --amend`
* **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies
@ -57,6 +83,9 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
Once it compiles locally, push it on a test branch and commit fixes
until the tests pass.
## Tidy beta
At some point after the release run

View File

@ -1 +1 @@
v1.66.0
v1.68.0

View File

@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
configfile.Install()
// Configure the remote
config.FileSet(remoteName, "type", "alias")
config.FileSet(remoteName, "remote", root)
config.FileSetValue(remoteName, "type", "alias")
config.FileSetValue(remoteName, "remote", root)
}
func TestNewFS(t *testing.T) {
@ -81,10 +81,12 @@ func TestNewFS(t *testing.T) {
for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i]
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
if !isDir {
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
_, isDir := gotEntry.(fs.Directory)
}
require.Equal(t, wantEntry.isDir, isDir, what)
}
}

View File

@ -17,7 +17,9 @@ import (
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
@ -39,6 +41,7 @@ import (
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/pixeldrain"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
@ -53,6 +56,7 @@ import (
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob
@ -712,10 +711,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ClientOptions: policyClientOptions,
}
// Here we auth by setting one of cred, sharedKeyCred or f.svc
// Here we auth by setting one of cred, sharedKeyCred, f.svc or anonymous
var (
cred azcore.TokenCredential
sharedKeyCred *service.SharedKeyCredential
anonymous = false
)
switch {
case opt.EnvAuth:
@ -875,6 +875,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
case opt.Account != "":
// Anonymous access
anonymous = true
default:
return nil, errors.New("no authentication method configured")
}
@ -904,6 +907,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("create client failed: %w", err)
}
} else if anonymous {
// Anonymous public access
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
if err != nil {
return nil, fmt.Errorf("create public client failed: %w", err)
}
}
}
if f.svc == nil {
@ -1089,7 +1098,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
if isDirectory {
// Don't insert the root directory
if remote == directory {
if remote == f.opt.Enc.ToStandardPath(directory) {
continue
}
// process directory markers as directories
@ -2085,7 +2094,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
return 0, nil
}
md5sum := m.Sum(nil)
transactionalMD5 := md5sum[:]
// increment the blockID and save the blocks for finalize
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
@ -2108,7 +2116,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
}
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
}
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
if err != nil {

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob

View File

@ -1,7 +1,6 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles
@ -1036,14 +1035,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
return fmt.Errorf("update: unable to create file: %w", createErr)
}
} else {
} else if size != o.Size() {
// Resize the file if needed
if size != o.Size() {
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
}
}
}
// Measure the size if it is unknown
if sizeUnknown {

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
}

View File

@ -60,6 +60,7 @@ const (
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
defaultMaxAge = 24 * time.Hour
)
// Globals
@ -101,7 +102,7 @@ below will cause b2 to return specific errors:
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
Default: "",
Hide: fs.OptionHideConfigurator,
Advanced: true,
@ -243,7 +244,7 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// See: https://www.backblaze.com/b2/docs/files.html
// See: https://www.backblaze.com/docs/cloud-storage-files
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// FIXME: allow /, but not leading, trailing or double
Default: (encoder.Display |
@ -305,6 +306,7 @@ type Object struct {
sha1 string // SHA-1 hash if known
size int64 // Size of the object
mimeType string // Content-Type of the object
meta map[string]string // The object metadata if known - may be nil - with lower case keys
}
// ------------------------------------------------------------
@ -362,7 +364,7 @@ var retryErrorCodes = []int{
504, // Gateway Time-out
}
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// shouldRetryNoReauth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
@ -1248,7 +1250,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error {
bucket, directory := f.split(dir)
if bucket == "" {
return errors.New("can't purge from root")
@ -1266,7 +1268,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
return time.Since(time.Time(timestamp)).Hours() > 24
return time.Since(time.Time(timestamp)) > maxAge
}
// Delete Config.Transfers in parallel
@ -1289,6 +1291,21 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
}()
}
if oldOnly {
if deleteHidden && deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
} else if deleteHidden {
fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
} else if deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
} else {
fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
return nil
}
} else {
fs.Infof(f, "cleaning bucket %q of all files", bucket)
}
last := ""
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
@ -1299,14 +1316,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {
if deleteHidden && object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
}
} else {
fs.Debugf(remote, "Deleting (id %q)", object.ID)
@ -1328,12 +1345,17 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false)
return f.purge(ctx, dir, false, false, false, defaultMaxAge)
}
// CleanUp deletes all the hidden files.
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true)
return f.purge(ctx, "", true, true, true, defaultMaxAge)
}
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
}
// copy does a server-side copy from dstObj <- srcObj
@ -1545,7 +1567,7 @@ func (o *Object) Size() int64 {
//
// Make sure it is lower case.
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
// Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) string {
const unverified = "unverified:"
@ -1572,7 +1594,14 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
return o.parseTimeString(Info[timeKey])
err = o.parseTimeString(Info[timeKey])
if err != nil {
return err
}
// For now, just set "mtime" in metadata
o.meta = make(map[string]string, 1)
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
return nil
}
// decodeMetaData sets the metadata in the object from an api.File
@ -1674,6 +1703,16 @@ func timeString(modTime time.Time) string {
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
}
// parseTimeStringHelper converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time
func parseTimeStringHelper(timeString string) (time.Time, error) {
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
}
// parseTimeString converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.
@ -1681,12 +1720,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
if timeString == "" {
return nil
}
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
modTime, err := parseTimeStringHelper(timeString)
if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return nil
}
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
o.modTime = modTime
return nil
}
@ -1763,14 +1802,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1)
}
return nil
@ -1840,6 +1879,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
// Embryonic metadata support - just mtime
o.meta = make(map[string]string, 1)
modTime, err := parseTimeStringHelper(info.Info[timeKey])
if err == nil {
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
}
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
@ -1937,7 +1984,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
if err != nil {
o.fs.putRW(rw)
return err
@ -1969,7 +2016,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(up.info)
}
modTime := src.ModTime(ctx)
modTime, err := o.getModTime(ctx, src, options)
if err != nil {
return err
}
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
if calculatedSha1 == "" {
@ -2074,6 +2124,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response)
}
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
// When metadata support is added to b2, this method will need a more generic name
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
}
// merge metadata into request and user metadata
for k, v := range meta {
k = strings.ToLower(k)
// For now, the only metadata we're concerned with is "mtime"
switch k {
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
default:
// Do nothing for now
}
}
return modTime, nil
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
@ -2105,7 +2185,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
return info, up, err
}
@ -2243,8 +2323,56 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
return bucket.LifecycleRules, nil
}
var cleanupHelp = fs.CommandHelp{
Name: "cleanup",
Short: "Remove unfinished large file uploads.",
Long: `This command removes unfinished large file uploads of age greater than
max-age, which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
}
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
maxAge := defaultMaxAge
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, false, true, maxAge)
}
var cleanupHiddenHelp = fs.CommandHelp{
Name: "cleanup-hidden",
Short: "Remove old versions of files.",
Long: `This command removes any old hidden versions of files.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup-hidden b2:bucket/path/to/dir
`,
}
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
return nil, f.cleanUp(ctx, true, false, 0)
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
cleanupHelp,
cleanupHiddenHelp,
}
// Command the backend to run a named command
@ -2260,6 +2388,10 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
case "cleanup":
return f.cleanupCommand(ctx, name, arg, opt)
case "cleanup-hidden":
return f.cleanupHiddenCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}

View File

@ -1,15 +1,29 @@
package b2
import (
"context"
"crypto/sha1"
"fmt"
"path"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test b2 string encoding
// https://www.backblaze.com/b2/docs/string_encoding.html
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
var encodeTest = []struct {
fullyEncoded string
@ -170,9 +184,303 @@ func TestParseTimeString(t *testing.T) {
}
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
var headers = make(map[string]string)
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
headers[k[len(headerPrefix):]] = v
}
}
return headers
}
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
t.Run(what, func(t *testing.T) {
ctx := context.Background()
ss := fs.SizeSuffix(0)
err := ss.Set(size)
require.NoError(t, err)
original := random.String(int(ss))
contents := fstest.Gz(t, original)
mimeType := "text/html"
if chunkSize != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(chunkSize)
require.NoError(t, err)
_, err = f.SetUploadChunkSize(ss)
require.NoError(t, err)
}
if uploadCutoff != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(uploadCutoff)
require.NoError(t, err)
_, err = f.SetUploadCutoff(ss)
require.NoError(t, err)
}
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
btime := time.Now()
metadata := fs.Metadata{
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
"mtime": "2009-05-06T04:05:06.499Z",
}
// Need to specify HTTP options with the header prefix since they are passed as-is
options := []fs.OpenOption{
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.getMetaData(ctx)
require.NoError(t, err)
// X-Bz-Info-a & X-Bz-Info-b
optMetadata := OpenOptionToMetaData(options)
for k, v := range optMetadata {
got := gotMetadata.Info[k]
assert.Equal(t, v, got, k)
}
// mtime
for k, v := range metadata {
got := o.meta[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header
var mtime api.Timestamp
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
if err != nil {
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
}
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
// Upload time
gotBtime := time.Time(gotMetadata.UploadTimestamp)
dt := gotBtime.Sub(btime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
t.Run("GzipEncoding", func(t *testing.T) {
// Test that the gzipped file we uploaded can be
// downloaded
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
gotContents := fstests.ReadObject(ctx, t, o, -1)
assert.Equal(t, wantContents, gotContents)
assert.Equal(t, wantSize, o.Size())
gotHash, err := o.Hash(ctx, hash.SHA1)
require.NoError(t, err)
assert.Equal(t, wantHash, gotHash)
}
t.Run("NoDecompress", func(t *testing.T) {
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
})
})
})
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
// 1 kB regular file
f.internalTestMetadata(t, "1kiB", "", "")
// 10 MiB large file
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
}
func sha1Sum(t *testing.T, s string) string {
hash := sha1.Sum([]byte(s))
return fmt.Sprintf("%x", hash)
}
// This is adapted from the s3 equivalent.
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()
// Small pause to make the LastModified different since AWS
// only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// Create an object
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
objMetadata, err := obj.(*Object).getMetaData(ctx)
require.NoError(t, err)
// Small pause
time.Sleep(2 * time.Second)
// Remove it
assert.NoError(t, obj.Remove(ctx))
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// And create it with different size and contents
newContents := random.String(101)
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
require.NoError(t, err)
t.Run("Versions", func(t *testing.T) {
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Read the contents
entries, err := f.List(ctx, dirName)
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
tests++
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
t.Run("ReadVersion", func(t *testing.T) {
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
fileNameVersion = remote
tests++
}
}
assert.Equal(t, 2, tests, "object missing from listing")
// Check we can read the object with a version suffix
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, fileNameVersion)
require.NoError(t, err)
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --b2-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {
// We set --b2-version-at for this test so make sure we reset it at the end
defer func() {
f.opt.VersionAt = fs.Time{}
}()
var (
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
)
for _, test := range []struct {
what string
at time.Time
want []fstest.Item
wantErr error
wantSize int64
}{
{
what: "Before",
at: firstObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterOne",
at: firstObjectTime.Add(time.Second),
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
wantSize: 100,
},
{
what: "AfterDelete",
at: secondObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterTwo",
at: secondObjectTime.Add(time.Second),
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
wantSize: 101,
},
} {
t.Run(test.what, func(t *testing.T) {
f.opt.VersionAt = fs.Time(test.at)
t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want)
})
// b2 NewObject doesn't work with VersionAt
//t.Run("NewObject", func(t *testing.T) {
// gotObj, gotErr := f.NewObject(ctx, fileName)
// assert.Equal(t, test.wantErr, gotErr)
// if gotErr == nil {
// assert.Equal(t, test.wantSize, gotObj.Size())
// }
//})
})
}
})
t.Run("Cleanup", func(t *testing.T) {
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
fstest.CheckListing(t, f, items)
})
// Purge gets tested later
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
// Internal tests go here
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@ -1,6 +1,6 @@
// Upload large files for b2
//
// Docs - https://www.backblaze.com/b2/docs/large_files.html
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
package b2
@ -91,7 +91,7 @@ type largeUpload struct {
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
size := src.Size()
parts := 0
chunkSize := defaultChunkSize
@ -104,11 +104,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
parts++
}
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket)
if err != nil {
@ -118,12 +113,27 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
}
optionsToSend := make([]fs.OpenOption, 0, len(options))
if newInfo == nil {
modTime := src.ModTime(ctx)
modTime, err := o.getModTime(ctx, src, options)
if err != nil {
return nil, err
}
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
timeKey: timeString(modTime),
}
// Custom upload headers - remove header prefix since they are sent in the body
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
request.Info[k[len(headerPrefix):]] = v
} else {
optionsToSend = append(optionsToSend, option)
}
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
@ -134,6 +144,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
Options: optionsToSend,
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
@ -410,8 +409,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil {
decPass = opt.PlexPassword
@ -424,7 +422,6 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
}
}
}
}
dbPath := f.opt.DbPath
chunkPath := f.opt.ChunkPath

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@ -30,10 +29,11 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/require"
)
@ -123,10 +123,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
/* TODO: is this testing something?
func TestInternalVfsCache(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 30
vfscommon.Opt.DirCacheTime = time.Second * 30
testSize := int64(524288000)
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
@ -338,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
@ -368,7 +368,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
@ -417,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
if runInstance.rootIsCrypt {
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
require.NoError(t, err)
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
expectedSize++ // FIXME newline gets in, likely test data issue
} else {
data2 = []byte("test content")
}
@ -708,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@ -743,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
}
func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
@ -850,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.FileSections() {
if s == remote {
for _, s := range config.GetRemotes() {
if s.Name == remote {
remoteExists = true
}
}
@ -875,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
cacheRemote := remote
if !remoteExists {
localRemote := remote + "-local"
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
config.FileSetValue(localRemote, "type", "local")
config.FileSetValue(localRemote, "nounc", "true")
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else {
remoteType := config.FileGet(remote, "type")
remoteType := config.GetValue(remote, "type")
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
@ -891,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
}
remoteRemote := config.FileGet(remote, "remote")
remoteRemote := config.GetValue(remote, "remote")
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.FileGet(remoteWrapping, "type")
remoteType := config.GetValue(remoteWrapping, "type")
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
@ -935,8 +935,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = f.Features().Purge(context.Background(), "")
require.NoError(t, err)
_ = operations.Purge(context.Background(), f, "")
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
@ -949,7 +948,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := f.Features().Purge(context.Background(), "")
err := operations.Purge(context.Background(), f, "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@ -1193,7 +1192,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
func (r *run) cleanSize(t *testing.T, size int64) int64 {
if r.rootIsCrypt {
denominator := int64(65536 + 16)
size = size - 32
size -= 32
quotient := size / denominator
remainder := size % denominator
return (quotient*65536 + remainder - 16)

View File

@ -1,7 +1,6 @@
// Test Cache filesystem interface
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test
@ -18,8 +17,9 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})
}

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
// Package cache implements a virtual provider to cache existing remotes.
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@ -119,7 +118,7 @@ func (r *Handle) startReadWorkers() {
r.scaleWorkers(totalWorkers)
}
// scaleOutWorkers will increase the worker pool count by the provided amount
// scaleWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := r.workers
if current == desired {
@ -209,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart = chunkStart - offset
chunkStart -= offset
r.queueOffset(chunkStart)
found := false
@ -328,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
}
r.queueOffset(chunkStart)
@ -416,11 +415,9 @@ func (w *worker) run() {
continue
}
}
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
// TODO: Remove this comment if it proves to be reliable for #1896

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,3 +1,6 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import bolt "go.etcd.io/bbolt"

View File

@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
)
// Chunker's composite files have one or more chunks
@ -101,8 +102,10 @@ var (
//
// And still chunker's primary function is to chunk large files
// rather than serve as a generic metadata container.
const maxMetadataSize = 1023
const maxMetadataSizeWritten = 255
const (
maxMetadataSize = 1023
maxMetadataSizeWritten = 255
)
// Current/highest supported metadata format.
const metadataVersion = 2
@ -305,7 +308,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
root: rpath,
opt: *opt,
}
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
@ -317,13 +319,15 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// i.e. `rpath` does not exist in the wrapped remote, but chunker
// detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
if err == nil && !f.useMeta {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
if testErr == fs.ErrorIsFile {
f.base = newBase
err = testErr
}
}
cache.PinUntilFinalized(f.base, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
@ -345,6 +349,11 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
BucketBased: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
@ -821,8 +830,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
}
case fs.Directory:
isSubdir[entry.Remote()] = true
wrapDir := fs.NewDirCopy(ctx, entry)
wrapDir.SetRemote(entry.Remote())
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
tempEntries = append(tempEntries, wrapDir)
default:
if f.opt.FailHard {
@ -955,6 +963,11 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
if caseInsensitive {
sameMain = strings.EqualFold(mainRemote, remote)
if sameMain && f.base.Features().IsLocal {
// on local, make sure the EqualFold still holds true when accounting for encoding.
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
}
} else {
sameMain = mainRemote == remote
}
@ -974,7 +987,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
}
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
if o.main == nil && len(o.chunks) == 0 {
// Scanning hasn't found data chunks with conforming names.
if f.useMeta || quickScan {
// Metadata is required but absent and there are no chunks.
@ -1130,8 +1143,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
basePut putFn, action string, target fs.Object,
) (obj fs.Object, err error) {
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, fmt.Errorf("%s refused: %w", action, err)
@ -1571,6 +1584,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.base.Mkdir(ctx, dir)
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.base.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@ -1888,6 +1909,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.base, srcRemote, dstRemote)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.base.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@ -2548,6 +2577,8 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)

View File

@ -36,6 +36,7 @@ func TestIntegration(t *testing.T) {
"GetTier",
"SetTier",
"Metadata",
"SetMetadata",
},
UnimplementableFsMethods: []string{
"PublicLink",

View File

@ -233,6 +233,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove := true
@ -440,6 +445,32 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return u.f.Mkdir(ctx, uRemote)
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
do := u.f.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, uRemote, metadata)
if err != nil {
return nil, err
}
entries := fs.DirEntries{newDir}
entries, err = u.wrapEntries(ctx, entries)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil {
@ -755,12 +786,11 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
case fs.Object:
entries[i] = u.newObject(x)
case fs.Directory:
newDir := fs.NewDirCopy(ctx, x)
newPath, err := u.pathAdjustment.do(newDir.Remote())
newPath, err := u.pathAdjustment.do(x.Remote())
if err != nil {
return nil, err
}
newDir.SetRemote(newPath)
newDir := fs.NewDirWrapper(newPath, x)
entries[i] = newDir
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
@ -783,7 +813,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewDir(combineDir, f.when)
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d)
}
return entries, nil
@ -965,6 +995,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return do(ctx, uDirs)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
u, uDir, err := f.findUpstream(dir)
if err != nil {
return err
}
if uDir == "" {
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
return nil
}
if do := u.f.Features().DirSetModTime; do != nil {
return do(ctx, uDir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@ -1073,6 +1119,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
@ -1099,6 +1156,8 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)

View File

@ -38,6 +38,7 @@ import (
const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
chunkStreams = 0 // Streams to use for reading
bufferSize = 8388608
heuristicBytes = 1048576
@ -194,6 +195,11 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
@ -450,7 +456,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
}
return nil
}
@ -784,6 +790,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, dir)
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.Fs.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@ -927,6 +941,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.Fs.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@ -1265,6 +1287,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
@ -1330,7 +1363,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
// Get a chunkedreader for the wrapped object
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
// Get file handle
var file io.Reader
if offset != 0 {
@ -1497,6 +1530,8 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)

View File

@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
for _, runeValue := range plaintext {
dir += int(runeValue)
}
dir = dir % 256
dir %= 256
// We'll use this number to store in the result filename...
var result bytes.Buffer
@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if pos >= 26 {
pos -= 6
}
pos = pos - thisdir
pos -= thisdir
if pos < 0 {
pos += 52
}
@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
fh.buf[i] = 0
}
}
fh.bufIndex = 0

View File

@ -130,6 +130,16 @@ trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "strict_names",
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
(By default, rclone will just log a NOTICE and continue as normal.)
This can happen if encrypted and unencrypted files are stored in the same
directory (which is not recommended.) It may also indicate a more serious
problem that should be investigated.`,
Default: false,
Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
@ -275,6 +285,11 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
@ -294,6 +309,7 @@ type Options struct {
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
StrictNames bool `config:"strict_names"`
}
// Fs represents a wrapped fs.Fs
@ -328,45 +344,64 @@ func (f *Fs) String() string {
}
// Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
if f.opt.StrictNames {
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
}
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
return nil
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
return nil
}
// Encrypt a directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
if f.opt.StrictNames {
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
}
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
return nil
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(ctx, dir))
return nil
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
errors := 0
var firsterr error
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
f.add(&newEntries, x)
err = f.add(&newEntries, x)
case fs.Directory:
f.addDir(ctx, &newEntries, x)
err = f.addDir(ctx, &newEntries, x)
default:
return nil, fmt.Errorf("unknown object type %T", entry)
}
if err != nil {
errors++
if firsterr == nil {
firsterr = err
}
}
}
if firsterr != nil {
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
}
return newEntries, nil
}
@ -485,7 +520,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@ -520,6 +555,37 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
do := f.Fs.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
if err != nil {
return nil, err
}
var entries = make(fs.DirEntries, 0, 1)
err = f.addDir(ctx, &entries, newDir)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
do := f.Fs.Features().DirSetModTime
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@ -761,7 +827,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
}
return do(ctx, out)
}
@ -997,14 +1063,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
newDir.SetRemote(decryptedRemote)
remote = decryptedRemote
}
newDir := fs.NewDirWrapper(remote, dir)
return newDir
}
@ -1182,6 +1248,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
@ -1207,6 +1284,8 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)

View File

@ -151,6 +151,7 @@ func (rwChoices) Choices() []fs.BitsChoicesInfo {
{Bit: uint64(rwOff), Name: "off"},
{Bit: uint64(rwRead), Name: "read"},
{Bit: uint64(rwWrite), Name: "write"},
{Bit: uint64(rwFailOK), Name: "failok"},
}
}
@ -160,6 +161,7 @@ type rwChoice = fs.Bits[rwChoices]
const (
rwRead rwChoice = 1 << iota
rwWrite
rwFailOK
rwOff rwChoice = 0
)
@ -173,6 +175,9 @@ var rwExamples = fs.OptionExamples{{
}, {
Value: rwWrite.String(),
Help: "Write the value only",
}, {
Value: rwFailOK.String(),
Help: "If writing fails log errors only, don't fail the transfer",
}, {
Value: (rwRead | rwWrite).String(),
Help: "Read and Write the value.",
@ -287,7 +292,10 @@ func init() {
},
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored in the properties field of the drive object.`,
Help: `User metadata is stored in the properties field of the drive object.
Metadata is supported on files and directories.
`,
},
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
@ -870,6 +878,11 @@ type Object struct {
v2Download bool // generate v2 download link ondemand
}
// Directory describes a drive directory
type Directory struct {
baseObject
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
@ -1383,6 +1396,11 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: false, // FIXME need to check!
}).Fill(ctx, f)
// Create a new authorized Drive client.
@ -1729,26 +1747,72 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
return pathIDOut, found, err
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
// createDir makes a directory with pathID as parent and name leaf with optional metadata
func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Metadata) (info *drive.File, err error) {
leaf = f.opt.Enc.FromStandardName(leaf)
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
pathID = actualID(pathID)
createInfo := &drive.File{
Name: leaf,
Description: leaf,
MimeType: driveFolderType,
Parents: []string{pathID},
}
var info *drive.File
var updateMetadata updateMetadataFn
if len(metadata) > 0 {
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
if err != nil {
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
}
}
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).
Fields("id").
Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
if updateMetadata != nil {
err = updateMetadata(ctx, info)
if err != nil {
return nil, err
}
}
return info, nil
}
// updateDir updates an existing a directory with the metadata passed in
func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata) (info *drive.File, err error) {
if len(metadata) == 0 {
return f.getFile(ctx, dirID, f.getFileFields(ctx))
}
dirID = actualID(dirID)
updateInfo := &drive.File{}
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
if err != nil {
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
}
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(dirID, updateInfo).
Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
err = updateMetadata(ctx, info)
if err != nil {
return nil, err
}
return info, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
info, err := f.createDir(ctx, pathID, leaf, nil)
if err != nil {
return "", err
}
@ -1859,7 +1923,7 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
return "", "", isDocument
}
// findExportFormatByMimeType works out the optimum export settings
// findExportFormat works out the optimum export settings
// for the given drive.File.
//
// Look through the exportExtensions and find the first format that can be
@ -2155,13 +2219,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case in <- job:
default:
overflow = append(overflow, job)
wg.Add(-1)
wg.Done()
}
}
// Send the entry to the caller, queueing any directories as new jobs
cb := func(entry fs.DirEntry) error {
if d, isDir := entry.(*fs.Dir); isDir {
if d, isDir := entry.(fs.Directory); isDir {
job := listREntry{actualID(d.ID()), d.Remote()}
sendJob(job)
}
@ -2338,11 +2402,11 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
if item.ResourceKey != "" {
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
}
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
d := fs.NewDir(remote, when).SetID(item.Id)
if len(item.Parents) > 0 {
d.SetParentID(item.Parents[0])
baseObject, err := f.newBaseObject(ctx, remote, item)
if err != nil {
return nil, err
}
d := &Directory{baseObject: baseObject}
return d, nil
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
@ -2370,7 +2434,6 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
// Define the metadata for the file we are going to create.
createInfo := &drive.File{
Name: leaf,
Description: leaf,
Parents: []string{directoryID},
ModifiedTime: modTime.Format(timeFormatOut),
}
@ -2535,6 +2598,59 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
// MkdirMetadata makes the directory passed in as dir.
//
// It shouldn't return an error if it already exists.
//
// If the metadata is not nil it is set.
//
// It returns the directory that was created.
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
var info *drive.File
dirID, err := f.dirCache.FindDir(ctx, dir, false)
if err == fs.ErrorDirNotFound {
// Directory does not exist so create it
var leaf, parentID string
leaf, parentID, err = f.dirCache.FindPath(ctx, dir, true)
if err != nil {
return nil, err
}
info, err = f.createDir(ctx, parentID, leaf, metadata)
} else if err == nil {
// Directory exists and needs updating
info, err = f.updateDir(ctx, dirID, metadata)
}
if err != nil {
return nil, err
}
// Convert the info into a directory entry
entry, err := f.itemToDirEntry(ctx, dir, info)
if err != nil {
return nil, err
}
dirEntry, ok := entry.(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry)
}
return dirEntry, nil
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
dirID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
o := baseObject{
fs: f,
remote: dir,
id: dirID,
}
return o.SetModTime(ctx, modTime)
}
// delete a file or directory unconditionally by ID
func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
return f.pacer.Call(func() (bool, error) {
@ -2678,6 +2794,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
createInfo.Description = ""
}
// Adjust metadata if required
updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), createInfo, false)
if err != nil {
return nil, err
}
// get the ID of the thing to copy
// copy the contents if CopyShortcutContent
// else copy the shortcut only
@ -2691,7 +2813,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
copy := f.svc.Files.Copy(id, createInfo).
Fields(partialFields).
Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever)
srcObj.addResourceKey(copy.Header())
@ -2711,7 +2833,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// FIXME remove this when google fixes the problem!
if isDoc {
// A short sleep is needed here in order to make the
// change effective, without it is is ignored. This is
// change effective, without it is ignored. This is
// probably some eventual consistency nastiness.
sleepTime := 2 * time.Second
fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime)
@ -2727,6 +2849,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
}
}
// Finalise metadata
err = updateMetadata(ctx, info)
if err != nil {
return nil, err
}
return newObject, nil
}
@ -2900,13 +3027,19 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dstParents := strings.Join(dstInfo.Parents, ",")
dstInfo.Parents = nil
// Adjust metadata if required
updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstInfo, true)
if err != nil {
return nil, err
}
// Do the move
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
RemoveParents(srcParentID).
AddParents(dstParents).
Fields(partialFields).
Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
@ -2915,6 +3048,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
// Finalise metadata
err = updateMetadata(ctx, info)
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
}
@ -3420,6 +3558,50 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
return nil
}
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
list := f.svc.Files.List()
if query != "" {
list.Q(query)
}
if f.opt.ListChunk > 0 {
list.PageSize(f.opt.ListChunk)
}
list.SupportsAllDrives(true)
list.IncludeItemsFromAllDrives(true)
if f.isTeamDrive && !f.opt.SharedWithMe {
list.DriveId(f.opt.TeamDriveID)
list.Corpora("drive")
}
// If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder")
}
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
var results []*drive.File
for {
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
}
results = append(results, files.Files...)
if files.NextPageToken == "" {
break
}
list.PageToken(files.NextPageToken)
}
return results, nil
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
@ -3570,6 +3752,47 @@ Use the --interactive/-i or --dry-run flag to see what would be copied before co
}, {
Name: "importformats",
Short: "Dump the import formats for debug purposes",
}, {
Name: "query",
Short: "List files using Google Drive query language",
Long: `This command lists files based on a query
Usage:
rclone backend query drive: query
The query syntax is documented at [Google Drive Search query terms and
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
For example:
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
If the query contains literal ' or \ characters, these need to be escaped with
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
file named "foo ' \.txt":
rclone backend query drive: "name = 'foo \' \\\.txt'"
The result is a JSON array of matches, for example:
[
{
"createdTime": "2017-06-29T19:58:28.537Z",
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
"mimeType": "text/plain",
"modifiedTime": "2024-02-02T10:40:02.874Z",
"name": "foo ' \\.txt",
"parents": [
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
],
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
"size": "311",
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
}
]`,
}}
// Command the backend to run a named command
@ -3687,6 +3910,17 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
return f.exportFormats(ctx), nil
case "importformats":
return f.importFormats(ctx), nil
case "query":
if len(arg) == 1 {
query := arg[0]
var results, err = f.query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
}
return results, nil
} else {
return nil, errors.New("need a query argument")
}
default:
return nil, fs.ErrorCommandNotFound
}
@ -3731,7 +3965,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
return "", hash.ErrUnsupported
}
return "", nil
@ -4193,6 +4427,37 @@ func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
// Items returns the count of items in this directory or this
// directory and subdirectories if known, -1 for unknown
func (d *Directory) Items() int64 {
return -1
}
// SetMetadata sets metadata for a Directory
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
info, err := d.fs.updateDir(ctx, d.id, metadata)
if err != nil {
return fmt.Errorf("failed to update directory info: %w", err)
}
// Update directory from info returned
baseObject, err := d.fs.newBaseObject(ctx, d.remote, info)
if err != nil {
return fmt.Errorf("failed to process directory info: %w", err)
}
d.baseObject = baseObject
return err
}
// Hash does nothing on a directory
//
// This method is implemented with the incorrect type signature to
// stop the Directory type asserting to fs.Object or fs.ObjectInfo
func (d *Directory) Hash() {
// Does nothing
}
// templates for document link files
const (
urlTemplate = `[InternetShortcut]{{"\r"}}
@ -4242,6 +4507,8 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
@ -4256,4 +4523,8 @@ var (
_ fs.MimeTyper = (*linkObject)(nil)
_ fs.IDer = (*linkObject)(nil)
_ fs.ParentIDer = (*linkObject)(nil)
_ fs.Directory = (*Directory)(nil)
_ fs.SetModTimer = (*Directory)(nil)
_ fs.SetMetadataer = (*Directory)(nil)
_ fs.ParentIDer = (*Directory)(nil)
)

View File

@ -524,12 +524,49 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
func (f *Fs) InternalTestQuery(t *testing.T) {
ctx := context.Background()
var err error
t.Run("BadQuery", func(t *testing.T) {
_, err = f.query(ctx, "this is a bad query")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to execute query")
})
t.Run("NoMatch", func(t *testing.T) {
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
require.NoError(t, err)
assert.Len(t, results, 0)
})
t.Run("GoodQuery", func(t *testing.T) {
pathSegments := strings.Split(existingFile, "/")
var parent string
for _, item := range pathSegments {
// the file name contains ' characters which must be escaped
escapedItem := f.opt.Enc.FromStandardName(item)
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
require.NoError(t, err)
require.True(t, len(results) > 0)
for _, result := range results {
assert.True(t, len(result.Id) > 0)
assert.Equal(t, result.Name, item)
}
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
}
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{}
opt := &filter.Options{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)
flt, err := filter.NewFilter(opt)
@ -611,6 +648,7 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)
}

View File

@ -9,6 +9,8 @@ import (
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/sync/errgroup"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
@ -37,7 +39,7 @@ var systemMetadataInfo = map[string]fs.MetadataHelp{
Example: "true",
},
"writers-can-share": {
Help: "Whether users with only writer permission can modify the file's permissions. Not populated for items in shared drives.",
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
Type: "boolean",
Example: "false",
},
@ -135,23 +137,30 @@ func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, use
// Set the permissions on the info
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
errs := errcount.New()
for _, perm := range permissions {
if perm.Role == "owner" {
// ignore owner permissions - these are set with owner
continue
}
cleanPermissionForWrite(perm)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, perm).
err := f.pacer.Call(func() (bool, error) {
_, err := f.svc.Permissions.Create(info.Id, perm).
SupportsAllDrives(true).
SendNotificationEmail(false).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set permission: %w", err)
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
errs.Add(err)
}
}
return nil
err = errs.Err("failed to set permission")
if err != nil {
err = fserrors.NoRetryError(err)
}
return err
}
// Clean attributes from permissions which we can't write
@ -253,7 +262,7 @@ func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.La
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
return fmt.Errorf("failed to set labels: %w", err)
}
return nil
}
@ -363,6 +372,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
// shared drives.
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
// Don't process permissions if there aren't any specifically set
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
info.Permissions = nil
info.PermissionIds = nil
}
@ -527,9 +537,13 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
return nil, err
}
case "writers-can-share":
if !f.isTeamDrive {
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
}
} else {
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
}
case "viewed-by-me":
// Can't write this
case "content-type":
@ -540,7 +554,12 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
// Can't set Owner on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setOwner(ctx, info, v)
err := f.setOwner(ctx, info, v)
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "permissions":
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
@ -553,7 +572,13 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
// Can't set Permissions on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setPermissions(ctx, info, perms)
err := f.setPermissions(ctx, info, perms)
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
// We've already logged the permissions errors individually here
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "labels":
if !f.opt.MetadataLabels.IsSet(rwWrite) {
@ -566,7 +591,12 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
// Can't set Labels on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setLabels(ctx, info, labels)
err := f.setLabels(ctx, info, labels)
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "folder-color-rgb":
updateInfo.FolderColorRgb = v

View File

@ -216,7 +216,10 @@ are supported.
Note that we don't unmount the shared folder afterwards so the
--dropbox-shared-folders can be omitted after the first use of a particular
shared folder.`,
shared folder.
See also --dropbox-root-namespace for an alternative way to work with shared
folders.`,
Default: false,
Advanced: true,
}, {
@ -237,6 +240,11 @@ shared folder.`,
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8,
}, {
Name: "root_namespace",
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
Default: "",
Advanced: true,
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
})
}
@ -253,6 +261,7 @@ type Options struct {
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
RootNsid string `config:"root_namespace"`
}
// Fs represents a remote dropbox server
@ -377,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
oldToken = strings.TrimSpace(oldToken)
if ok && oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
return nil, fmt.Errorf("NewFS convert token: %w", err)
@ -502,8 +511,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features.Fill(ctx, f)
if f.opt.RootNsid != "" {
f.ns = f.opt.RootNsid
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
} else if strings.HasPrefix(root, "/") {
// If root starts with / then use the actual root
if strings.HasPrefix(root, "/") {
var acc *users.FullAccount
err = f.pacer.Call(func() (bool, error) {
acc, err = f.users.GetCurrentAccount()
@ -644,7 +656,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// listSharedFolders lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
started := false

View File

@ -0,0 +1,912 @@
// Package filescom provides an interface to the Files.com
// object storage system.
package filescom
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
files_sdk "github.com/Files-com/files-sdk-go/v3"
"github.com/Files-com/files-sdk-go/v3/bundle"
"github.com/Files-com/files-sdk-go/v3/file"
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
"github.com/Files-com/files-sdk-go/v3/folder"
"github.com/Files-com/files-sdk-go/v3/session"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
)
/*
Run of rclone info
stringNeedsEscaping = []rune{
'/', '\x00'
}
maxFileLength = 512 // for 1 byte unicode characters
maxFileLength = 512 // for 2 byte unicode characters
maxFileLength = 512 // for 3 byte unicode characters
maxFileLength = 512 // for 4 byte unicode characters
canWriteUnnormalized = true
canReadUnnormalized = true
canReadRenormalized = true
canStream = true
*/
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
folderNotEmpty = "processing-failure/folder-not-empty"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "filescom",
Description: "Files.com",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "site",
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
}, {
Name: "username",
Help: "The username used to authenticate with Files.com.",
}, {
Name: "password",
Help: "The password used to authenticate with Files.com.",
IsPassword: true,
}, {
Name: "api_key",
Help: "The API key used to authenticate with Files.com.",
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Site string `config:"site"`
Username string `config:"username"`
Password string `config:"password"`
APIKey string `config:"api_key"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote files.com server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
fileClient *file.Client // the connection to the file API
folderClient *folder.Client // the connection to the folder API
migrationClient *file_migration.Client // the connection to the file migration API
bundleClient *bundle.Client // the connection to the bundle API
pacer *fs.Pacer // pacer for API calls
}
// Object describes a files object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64 // size of the object
crc32 string // CRC32 of the object content
md5 string // MD5 of the object content
mimeType string // Content-Type of the object
modTime time.Time // modification time of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("files root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Encode remote and turn it into an absolute path in the share
func (f *Fs) absPath(remote string) string {
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if apiErr, ok := err.(files_sdk.ResponseError); ok {
for _, e := range retryErrorCodes {
if apiErr.HttpCode == e {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
}
}
return fserrors.ShouldRetry(err), err
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
params := files_sdk.FileFindParams{
Path: f.absPath(path),
}
var file files_sdk.File
err = f.pacer.Call(func() (bool, error) {
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
return &file, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = strings.Trim(root, "/")
config, err := newClientConfig(ctx, opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
fileClient: &file.Client{Config: config},
folderClient: &folder.Client{Config: config},
migrationClient: &file_migration.Client{Config: config},
bundleClient: &bundle.Client{Config: config},
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f)
if f.root != "" {
info, err := f.readMetaDataForPath(ctx, "")
if err == nil && !info.IsDir() {
f.root = path.Dir(f.root)
if f.root == "." {
f.root = ""
}
return f, fs.ErrorIsFile
}
}
return f, err
}
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
if opt.Site != "" {
config.Subdomain = opt.Site
_, err = url.Parse(config.Endpoint())
if err != nil {
config.Subdomain = ""
config.EndpointOverride = opt.Site
_, err = url.Parse(config.Endpoint())
if err != nil {
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
return
}
}
}
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
if opt.APIKey != "" {
config.APIKey = opt.APIKey
return
}
if opt.Username == "" {
err = errors.New("username not found")
return
}
if opt.Password == "" {
err = errors.New("password not found")
return
}
opt.Password, err = obscure.Reveal(opt.Password)
if err != nil {
return
}
sessionClient := session.Client{Config: config}
params := files_sdk.SessionCreateParams{
Username: opt.Username,
Password: opt.Password,
}
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
if err != nil {
err = fmt.Errorf("couldn't create session: %w", err)
return
}
config.SessionId = thisSession.Id
return
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
var err error
if file != nil {
err = o.setMetaData(file)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
}
if err != nil {
return nil, err
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
var it *folder.Iter
params := files_sdk.FolderListForParams{
Path: f.absPath(dir),
}
err = f.pacer.Call(func() (bool, error) {
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't list files: %w", err)
}
for it.Next() {
item := ptr(it.File())
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
remote = path.Join(dir, remote)
if remote == dir {
continue
}
item, err = f.readMetaDataForPath(ctx, remote)
if err != nil {
if files_sdk.IsNotExist(err) {
continue
}
return nil, err
}
if item.IsDir() {
d := fs.NewDir(remote, item.ModTime())
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, item)
if err != nil {
return nil, err
}
entries = append(entries, o)
}
}
err = it.Err()
if files_sdk.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object and error.
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
// Create the directory for the object if it doesn't exist
err = f.mkParentDir(ctx, remote)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, nil
}
// Put the object
//
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
func (f *Fs) mkdir(ctx context.Context, path string) error {
if path == "" || path == "." {
return nil
}
params := files_sdk.FolderCreateParams{
Path: path,
MkdirParents: ptr(true),
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if files_sdk.IsExist(err) {
return nil
}
return err
}
// Make the parent directory of remote
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.mkdir(ctx, f.absPath(dir))
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
o := Object{
fs: f,
remote: dir,
}
return o.SetModTime(ctx, modTime)
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
path := f.absPath(dir)
if path == "" || path == "." {
return errors.New("can't purge root directory")
}
params := files_sdk.FileDeleteParams{
Path: path,
Recursive: ptr(!check),
}
err := f.pacer.Call(func() (bool, error) {
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
// Allow for eventual consistency deletion of child objects.
if isFolderNotEmpty(err) {
return true, err
}
return shouldRetry(ctx, err)
})
if err != nil {
if files_sdk.IsNotExist(err) {
return fs.ErrorDirNotFound
} else if isFolderNotEmpty(err) {
return fs.ErrorDirectoryNotEmpty
}
return fmt.Errorf("rmdir failed: %w", err)
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
if err != nil {
return
}
srcPath := srcObj.fs.absPath(srcObj.remote)
dstPath := f.absPath(remote)
if strings.EqualFold(srcPath, dstPath) {
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
dstObj, err = f.createObject(ctx, remote)
if err != nil {
return
}
// Copy the object
params := files_sdk.FileCopyParams{
Path: srcPath,
Destination: dstPath,
Overwrite: ptr(true),
}
var action files_sdk.FileAction
err = f.pacer.Call(func() (bool, error) {
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return
}
err = f.waitForAction(ctx, action, "copy")
if err != nil {
return
}
err = dstObj.SetModTime(ctx, srcObj.modTime)
return
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// move a file or folder
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
// Move the object
params := files_sdk.FileMoveParams{
Path: src.absPath(srcRemote),
Destination: f.absPath(dstRemote),
}
var action files_sdk.FileAction
err = f.pacer.Call(func() (bool, error) {
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
err = f.waitForAction(ctx, action, "move")
if err != nil {
return nil, err
}
info, err = f.readMetaDataForPath(ctx, dstRemote)
return
}
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
var migration files_sdk.FileMigration
err = f.pacer.Call(func() (bool, error) {
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
// noop
}, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err == nil && migration.Status != "completed" {
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
}
return
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// Do the move
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
if err != nil {
return nil, err
}
err = dstObj.setMetaData(info)
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if destination exists
_, err = f.readMetaDataForPath(ctx, dstRemote)
if err == nil {
return fs.ErrorDirExists
}
// Create temporary object
dstObj, err := f.createObject(ctx, dstRemote)
if err != nil {
return
}
// Do the move
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
return
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
params := files_sdk.BundleCreateParams{
Paths: []string{f.absPath(remote)},
}
if expire < fs.DurationOff {
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
}
var bundle files_sdk.Bundle
err = f.pacer.Call(func() (bool, error) {
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
url = bundle.Url
return
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet(hash.CRC32, hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
switch t {
case hash.CRC32:
if o.crc32 == "" {
return "", nil
}
return fmt.Sprintf("%08s", o.crc32), nil
case hash.MD5:
return o.md5, nil
}
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// setMetaData sets the metadata from info
func (o *Object) setMetaData(file *files_sdk.File) error {
o.modTime = file.ModTime()
if !file.IsDir() {
o.size = file.Size
o.crc32 = file.Crc32
o.md5 = file.Md5
o.mimeType = file.MimeType
}
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
if err != nil {
if files_sdk.IsNotExist(err) {
return fs.ErrorObjectNotFound
}
return err
}
if file.IsDir() {
return fs.ErrorIsDir
}
return o.setMetaData(file)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
params := files_sdk.FileUpdateParams{
Path: o.fs.absPath(o.remote),
ProvidedMtime: &modTime,
}
var file files_sdk.File
err = o.fs.pacer.Call(func() (bool, error) {
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return err
}
return o.setMetaData(&file)
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download
var offset, count int64
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
params := files_sdk.FileDownloadParams{
Path: o.fs.absPath(o.remote),
}
headers := &http.Header{}
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.fileClient.Download(
params,
files_sdk.WithContext(ctx),
files_sdk.RequestHeadersOption(headers),
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
in = closer
return err
}),
)
return shouldRetry(ctx, err)
})
return
}
// Returns a pointer to t - useful for returning pointers to constants
func ptr[T any](t T) *T {
return &t
}
func isFolderNotEmpty(err error) bool {
var re files_sdk.ResponseError
ok := errors.As(err, &re)
return ok && re.Type == folderNotEmpty
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
uploadOpts := []file.UploadOption{
file.UploadWithContext(ctx),
file.UploadWithReader(in),
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
file.UploadWithProvidedMtime(src.ModTime(ctx)),
}
err := o.fs.pacer.Call(func() (bool, error) {
err := o.fs.fileClient.Upload(uploadOpts...)
return shouldRetry(ctx, err)
})
if err != nil {
return err
}
return o.readMetaData(ctx)
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := files_sdk.FileDeleteParams{
Path: o.fs.absPath(o.remote),
}
return o.fs.pacer.Call(func() (bool, error) {
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@ -0,0 +1,17 @@
// Test Files filesystem interface
package filescom_test
import (
"testing"
"github.com/rclone/rclone/backend/filescom"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFilesCom:",
NilObject: (*filescom.Object)(nil),
})
}

View File

@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
Default: false,
}, {
Name: "concurrency",
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`", -1),
`, "|", "`"),
Default: 0,
Advanced: true,
}, {

311
backend/gofile/api/types.go Normal file
View File

@ -0,0 +1,311 @@
// Package api has type definitions for gofile
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"time"
)
const (
// 2017-05-03T07:26:10-07:00
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the
// gofile API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Error is returned from gofile when things go wrong
type Error struct {
Status string `json:"status"`
}
// Error returns a string for the error and satisfies the error interface
func (e Error) Error() string {
out := fmt.Sprintf("Error %q", e.Status)
return out
}
// IsError returns true if there is an error
func (e Error) IsError() bool {
return e.Status != "ok"
}
// Err returns err if not nil, or e if IsError or nil
func (e Error) Err(err error) error {
if err != nil {
return err
}
if e.IsError() {
return e
}
return nil
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
)
// Item describes a folder or a file as returned by /contents
type Item struct {
ID string `json:"id"`
ParentFolder string `json:"parentFolder"`
Type string `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Code string `json:"code"`
CreateTime int64 `json:"createTime"`
ModTime int64 `json:"modTime"`
Link string `json:"link"`
MD5 string `json:"md5"`
MimeType string `json:"mimetype"`
ChildrenCount int `json:"childrenCount"`
DirectLinks map[string]*DirectLink `json:"directLinks"`
//Public bool `json:"public"`
//ServerSelected string `json:"serverSelected"`
//Thumbnail string `json:"thumbnail"`
//DownloadCount int `json:"downloadCount"`
//TotalDownloadCount int64 `json:"totalDownloadCount"`
//TotalSize int64 `json:"totalSize"`
//ChildrenIDs []string `json:"childrenIds"`
Children map[string]*Item `json:"children"`
}
// ToNativeTime converts a go time to a native time
func ToNativeTime(t time.Time) int64 {
return t.Unix()
}
// FromNativeTime converts native time to a go time
func FromNativeTime(t int64) time.Time {
return time.Unix(t, 0)
}
// DirectLink describes a direct link to a file so it can be
// downloaded by third parties.
type DirectLink struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
DirectLink string `json:"directLink"`
}
// Contents is returned from the /contents call
type Contents struct {
Error
Data struct {
Item
} `json:"data"`
Metadata Metadata `json:"metadata"`
}
// Metadata is returned when paging is in use
type Metadata struct {
TotalCount int `json:"totalCount"`
TotalPages int `json:"totalPages"`
Page int `json:"page"`
PageSize int `json:"pageSize"`
HasNextPage bool `json:"hasNextPage"`
}
// AccountsGetID is the result of /accounts/getid
type AccountsGetID struct {
Error
Data struct {
ID string `json:"id"`
} `json:"data"`
}
// Stats of storage and traffic
type Stats struct {
FolderCount int64 `json:"folderCount"`
FileCount int64 `json:"fileCount"`
Storage int64 `json:"storage"`
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
}
// AccountsGet is the result of /accounts/{id}
type AccountsGet struct {
Error
Data struct {
ID string `json:"id"`
Email string `json:"email"`
Tier string `json:"tier"`
PremiumType string `json:"premiumType"`
Token string `json:"token"`
RootFolder string `json:"rootFolder"`
SubscriptionProvider string `json:"subscriptionProvider"`
SubscriptionEndDate int `json:"subscriptionEndDate"`
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
StatsCurrent Stats `json:"statsCurrent"`
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
} `json:"data"`
}
// CreateFolderRequest is the input to /contents/createFolder
type CreateFolderRequest struct {
ParentFolderID string `json:"parentFolderId"`
FolderName string `json:"folderName"`
ModTime int64 `json:"modTime,omitempty"`
}
// CreateFolderResponse is the output from /contents/createFolder
type CreateFolderResponse struct {
Error
Data Item `json:"data"`
}
// DeleteRequest is the input to DELETE /contents
type DeleteRequest struct {
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// DeleteResponse is the input to DELETE /contents
type DeleteResponse struct {
Error
Data map[string]Error
}
// Server is an upload server
type Server struct {
Name string `json:"name"`
Zone string `json:"zone"`
}
// String returns a string representation of the Server
func (s *Server) String() string {
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
}
// Root returns the root URL for the server
func (s *Server) Root() string {
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
}
// URL returns the upload URL for the server
func (s *Server) URL() string {
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
}
// ServersResponse is the output from /servers
type ServersResponse struct {
Error
Data struct {
Servers []Server `json:"servers"`
} `json:"data"`
}
// UploadResponse is returned by POST /contents/uploadfile
type UploadResponse struct {
Error
Data Item `json:"data"`
}
// DirectLinksRequest specifies the parameters for the direct link
type DirectLinksRequest struct {
ExpireTime int64 `json:"expireTime,omitempty"`
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
Auth []any `json:"auth,omitempty"`
}
// DirectLinksResult is returned from POST /contents/{id}/directlinks
type DirectLinksResult struct {
Error
Data struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
ID string `json:"id"`
DirectLink string `json:"directLink"`
} `json:"data"`
}
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
//
// The Value of the attribute to define :
// For Attribute "name" : The name of the content (file or folder)
// For Attribute "description" : The description displayed on the download page (folder only)
// For Attribute "tags" : A comma-separated list of tags (folder only)
// For Attribute "public" : either true or false (folder only)
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
// For Attribute "password" : The password to set (folder only)
type UpdateItemRequest struct {
Attribute string `json:"attribute"`
Value any `json:"attributeValue"`
}
// UpdateItemResponse is returned by PUT /contents/{id}/update
type UpdateItemResponse struct {
Error
Data Item `json:"data"`
}
// MoveRequest is the input to /contents/move
type MoveRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// MoveResponse is returned by POST /contents/move
type MoveResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// CopyRequest is the input to /contents/copy
type CopyRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// CopyResponse is returned by POST /contents/copy
type CopyResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// UploadServerStatus is returned when fetching the root of an upload server
type UploadServerStatus struct {
Error
Data struct {
Server string `json:"server"`
Test string `json:"test"`
} `json:"data"`
}

1633
backend/gofile/gofile.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
// Test Gofile filesystem interface
package gofile_test
import (
"testing"
"github.com/rclone/rclone/backend/gofile"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestGoFile:",
NilObject: (*gofile.Object)(nil),
})
}

View File

@ -697,7 +697,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == directory {
if remote == f.opt.Enc.ToStandardPath(directory) {
continue
}
// process directory markers as directories

View File

@ -620,9 +620,7 @@ func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {

View File

@ -38,7 +38,7 @@ type dirPattern struct {
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
// dirPatterns is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.

View File

@ -173,6 +173,11 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
@ -341,6 +346,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return errors.New("MergeDirs not supported")
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.Fs.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.Fs.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
@ -418,7 +439,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Shutdown the backend, closing any background tasks and any cached connections.
func (f *Fs) Shutdown(ctx context.Context) (err error) {
if f.db != nil {
if f.db != nil && !f.db.IsStopped() {
err = f.db.Stop(false)
}
if do := f.Fs.Features().Shutdown; do != nil {
@ -514,6 +535,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@ -530,6 +562,8 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)

View File

@ -71,7 +71,14 @@ func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string,
f := o.f
if f.passHashes.Contains(hashType) {
fs.Debugf(o, "pass %s", hashType)
return o.Object.Hash(ctx, hashType)
hashVal, err = o.Object.Hash(ctx, hashType)
if hashVal != "" {
return hashVal, err
}
if err != nil {
fs.Debugf(o, "error passing %s: %v", hashType, err)
}
fs.Debugf(o, "passed %s is blank -- trying other methods", hashType)
}
if !f.suppHashes.Contains(hashType) {
fs.Debugf(o, "unsupp %s", hashType)

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs
@ -150,7 +149,7 @@ func (f *Fs) Root() string {
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
return fmt.Sprintf("hdfs://%s/%s", f.opt.Namenode, f.root)
}
// Features returns the optional features of this Fs
@ -210,7 +209,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
size: x.Size(),
modTime: x.ModTime()})
modTime: x.ModTime(),
})
}
}
return entries, nil

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs

View File

@ -1,7 +1,6 @@
// Test HDFS filesystem interface
//go:build !plan9
// +build !plan9
package hdfs_test

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9
// +build plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs

View File

@ -89,6 +89,10 @@ that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
Default: false,
Advanced: true,
}, {
Name: "no_escape",
Help: "Do not escape URL metacharacters in path names.",
Default: false,
}},
}
fs.Register(fsi)
@ -100,6 +104,7 @@ type Options struct {
NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
NoEscape bool `config:"no_escape"`
}
// Fs stores the interface to the remote HTTP files
@ -326,6 +331,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Join's the remote onto the base URL
func (f *Fs) url(remote string) string {
if f.opt.NoEscape {
// Directly concatenate without escaping, no_escape behavior
return f.endpointURL + remote
}
// Default behavior
return f.endpointURL + rest.URLPathEscape(remote)
}

View File

@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
var path = strings.Replace(resultURL, endpoint, "", 1)
path = path + expires
path += expires
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
mac.Write([]byte(path))
signature := hex.EncodeToString(mac.Sum(nil))

View File

@ -1487,16 +1487,38 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(ctx, remote)
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
if err != nil {
return nil, err
}
if err := f.mkParentDir(ctx, remote); err != nil {
return nil, err
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
if err == nil {
var createTime time.Time
var createTimeMeta bool
var modTime time.Time
var modTimeMeta bool
if meta != nil {
createTime, createTimeMeta = srcObj.parseFsMetadataTime(meta, "btime")
if !createTimeMeta {
createTime = srcObj.createTime
}
modTime, modTimeMeta = srcObj.parseFsMetadataTime(meta, "mtime")
if !modTimeMeta {
modTime = srcObj.modTime
}
}
if bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
// Workaround necessary when destination was a trashed file, to avoid the copied file also being in trash (bug in api?)
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5)
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
} else if createTimeMeta || modTimeMeta {
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
}
}
if err != nil {
@ -1523,12 +1545,30 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(ctx, remote)
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
if err != nil {
return nil, err
}
if err := f.mkParentDir(ctx, remote); err != nil {
return nil, err
}
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
if err != nil && meta != nil {
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
if !createTimeMeta {
createTime = srcObj.createTime
}
modTime, modTimeMeta := srcObj.parseFsMetadataTime(meta, "mtime")
if !modTimeMeta {
modTime = srcObj.modTime
}
if createTimeMeta || modTimeMeta {
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
}
}
if err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err)
}
@ -1786,6 +1826,20 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
return o.setMetaData(info)
}
// parseFsMetadataTime parses a time string from fs.Metadata with key
func (o *Object) parseFsMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
value, ok := m[key]
if ok {
var err error
t, err = time.Parse(time.RFC3339Nano, value) // metadata stores RFC3339Nano timestamps
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
ok = false
}
}
return t, ok
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
@ -1957,23 +2011,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var createdTime string
var modTime string
if meta != nil {
if v, ok := meta["btime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
if err != nil {
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
} else {
if t, ok := o.parseFsMetadataTime(meta, "btime"); ok {
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
}
}
if v, ok := meta["mtime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
} else {
if t, ok := o.parseFsMetadataTime(meta, "mtime"); ok {
modTime = api.Rfc3339Time(t).String()
}
}
}
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
modTime = api.Rfc3339Time(src.ModTime(ctx)).String()
}

View File

@ -59,7 +59,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
//"utime" - read-only
//"content-type" - read-only
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, false, contents, true, "text/html", metadata)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()

View File

@ -67,13 +67,13 @@ func init() {
Sensitive: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Help: "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.",
Provider: "koofr",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
Help: "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.",
Provider: "digistorage",
IsPassword: true,
Required: true,

View File

@ -36,7 +36,7 @@ import (
)
const (
maxEntitiesPerPage = 1024
maxEntitiesPerPage = 1000
minSleep = 200 * time.Millisecond
maxSleep = 2 * time.Second
pacerBurst = 1
@ -219,7 +219,8 @@ type listAllFn func(*entity) bool
// Search is a bit fussy about which characters match
//
// If the name doesn't match this then do an dir list instead
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ .]+$`)
// N.B.: Linkbox doesn't support search by name that is longer than 50 chars
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ -.]{1,50}$`)
// Lists the directory required calling the user function on each item found
//
@ -238,6 +239,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, name string, fn listAllF
// If name isn't good then do an unbounded search
name = ""
}
OUTER:
for numberOfEntities == maxEntitiesPerPage {
pageNumber++
@ -258,7 +260,6 @@ OUTER:
err = getUnmarshaledResponse(ctx, f, opts, &responseResult)
if err != nil {
return false, fmt.Errorf("getting files failed: %w", err)
}
numberOfEntities = len(responseResult.SearchData.Entities)

View File

@ -13,5 +13,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestLinkbox:",
NilObject: (*linkbox.Object)(nil),
// Linkbox doesn't support leading dots for files
SkipLeadingDot: true,
})
}

View File

@ -1,5 +1,4 @@
//go:build darwin || dragonfly || freebsd || linux
// +build darwin dragonfly freebsd linux
package local
@ -24,9 +23,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
bs := int64(s.Bsize) // nolint: unconvert
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(bs * int64(s.Blocks)), //nolint: unconvert // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), //nolint: unconvert // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), //nolint: unconvert // bytes which can be uploaded before reaching the quota
}
return usage, nil
}

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package local

View File

@ -0,0 +1,82 @@
//go:build darwin && cgo
// Package local provides a filesystem interface
package local
import (
"context"
"fmt"
"runtime"
"github.com/go-darwin/apfs"
"github.com/rclone/rclone/fs"
)
// Copy src to this remote using server-side copy operations.
//
// # This is stored with the remote path given
//
// # It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone {
return nil, fs.ErrorCantCopy
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't clone - not same remote type")
return nil, fs.ErrorCantCopy
}
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
if err != nil {
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
}
// Create destination
dstObj := f.newObject(remote)
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
err = Clone(srcObj.path, f.localPath(remote))
if err != nil {
return nil, err
}
fs.Debugf(remote, "server-side cloned!")
// Set metadata if --metadata is in use
if meta != nil {
err = dstObj.writeMetadata(meta)
if err != nil {
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
}
}
return f.NewObject(ctx, remote)
}
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
func Clone(src, dst string) error {
state := apfs.CopyFileStateAlloc()
defer func() {
if err := apfs.CopyFileStateFree(state); err != nil {
fs.Errorf(dst, "free state error: %v", err)
}
}()
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
return err
}
// Check the interfaces are satisfied
var (
_ fs.Copier = &Fs{}
)

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package local

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package local

View File

@ -1,5 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js
package local

View File

@ -1,5 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

View File

@ -32,9 +32,32 @@ import (
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
const (
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
)
// timeType allows the user to choose what exactly ModTime() returns
type timeType = fs.Enum[timeTypeChoices]
const (
mTime timeType = iota
aTime
bTime
cTime
)
type timeTypeChoices struct{}
func (timeTypeChoices) Choices() []string {
return []string{
mTime: "mtime",
aTime: "atime",
bTime: "btime",
cTime: "ctime",
}
}
// Register with Fs
func init() {
@ -53,9 +76,12 @@ netbsd, macOS and Solaris. It is **not** supported on Windows yet
User metadata is stored as extended attributes (which may not be
supported by all file systems) under the "user.*" prefix.
Metadata is supported on files and directories.
`,
},
Options: []fs.Option{{
Options: []fs.Option{
{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows.",
Default: false,
@ -64,21 +90,24 @@ supported by all file systems) under the "user.*" prefix.
Value: "true",
Help: "Disables long file names.",
}},
}, {
},
{
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
},
{
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true,
}, {
},
{
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
@ -87,7 +116,8 @@ points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
}, {
},
{
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
@ -101,7 +131,8 @@ So rclone now always reads the link.
`,
Default: false,
Advanced: true,
}, {
},
{
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames.
@ -119,7 +150,8 @@ Note that rclone compares filenames with unicode normalization in the sync
routine so this flag shouldn't normally be used.`,
Default: false,
Advanced: true,
}, {
},
{
Name: "no_check_updated",
Help: `Don't check to see if the files change during upload.
@ -154,14 +186,16 @@ the direct stat value and setting this flag will disable that.
`,
Default: false,
Advanced: true,
}, {
},
{
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
}, {
},
{
Name: "case_sensitive",
Help: `Force the filesystem to report itself as case sensitive.
@ -170,7 +204,8 @@ Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
},
{
Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive.
@ -179,7 +214,29 @@ Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
},
{
Name: "no_clone",
Help: `Disable reflink cloning for server-side copies.
Normally, for local-to-local transfers, rclone will "clone" the file when
possible, and fall back to "copying" only when cloning is not supported.
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
the original file. Unlike a "hardlink", the two files are independent and
neither will affect the other if subsequently modified.
Cloning is usually preferable to copying, as it is much faster and is
deduplicated by default (i.e. having two identical files does not consume more
storage than having just one.) However, for use cases where data redundancy is
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
Currently, cloning is only supported when using APFS on macOS (support for other
platforms may be added in the future.)`,
Default: false,
Advanced: true,
},
{
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files.
@ -190,7 +247,8 @@ preallocated space, causing checksum and file size checks to fail.
Use this flag to disable preallocation.`,
Default: false,
Advanced: true,
}, {
},
{
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads.
@ -200,7 +258,8 @@ the OS zeros the file. However sparse files may be undesirable as they
cause disk fragmentation and can be slow to work with.`,
Default: false,
Advanced: true,
}, {
},
{
Name: "no_set_modtime",
Help: `Disable setting modtime.
@ -211,12 +270,51 @@ when copying to a CIFS mount owned by another user. If this option is
enabled, rclone will no longer update the modtime after copying a file.`,
Default: false,
Advanced: true,
},
{
Name: "time_type",
Help: `Set what kind of time is returned.
Normally rclone does all operations on the mtime or Modification time.
If you set this flag then rclone will return the Modified time as whatever
you set here. So if you use "rclone lsl --local-time-type ctime" then
you will see ctimes in the listing.
If the OS doesn't support returning the time_type specified then rclone
will silently replace it with the modification time which all OSes support.
- mtime is supported by all OSes
- atime is supported on all OSes except: plan9, js
- btime is only supported on: Windows, macOS, freebsd, netbsd
- ctime is supported on all Oses except: Windows, plan9, js
Note that setting the time will still set the modified time so this is
only useful for reading.
`,
Default: mTime,
Advanced: true,
Examples: []fs.OptionExample{{
Value: mTime.String(),
Help: "The last modification time.",
}, {
Value: aTime.String(),
Help: "The last access time.",
}, {
Value: bTime.String(),
Help: "The creation time.",
}, {
Value: cTime.String(),
Help: "The last status change time.",
}},
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: encoder.OS,
}},
},
},
}
fs.Register(fsi)
}
@ -235,7 +333,9 @@ type Options struct {
NoPreAllocate bool `config:"no_preallocate"`
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
TimeType timeType `config:"time_type"`
Enc encoder.MultiEncoder `config:"encoding"`
NoClone bool `config:"no_clone"`
}
// Fs represents a local filesystem rooted at root
@ -270,6 +370,11 @@ type Object struct {
translatedLink bool // Is this object a translated link
}
// Directory represents a local filesystem directory
type Directory struct {
Object
}
// ------------------------------------------------------------
var (
@ -307,6 +412,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
SlowHash: true,
ReadMetadata: true,
WriteMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
DirModTimeUpdatesOnWrite: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
PartialUploads: true,
@ -314,6 +424,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.FollowSymlinks {
f.lstat = os.Stat
}
if opt.NoClone {
// Disable server-side copy when --local-no-clone is set
f.features.Copy = nil
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
@ -453,6 +567,15 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Create new directory object from the info passed in
func (f *Fs) newDirectory(dir string, fi os.FileInfo) *Directory {
o := f.newObject(dir)
o.setMetadata(fi)
return &Directory{
Object: *o,
}
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
@ -563,7 +686,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := fs.NewDir(newRemote, fi.ModTime())
d := f.newDirectory(newRemote, fi)
entries = append(entries, d)
}
} else {
@ -643,6 +766,58 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return nil
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
o := Object{
fs: f,
remote: dir,
path: f.localPath(dir),
}
return o.SetModTime(ctx, modTime)
}
// MkdirMetadata makes the directory passed in as dir.
//
// It shouldn't return an error if it already exists.
//
// If the metadata is not nil it is set.
//
// It returns the directory that was created.
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
// Find and or create the directory
localPath := f.localPath(dir)
fi, err := f.lstat(localPath)
if errors.Is(err, os.ErrNotExist) {
err := f.Mkdir(ctx, dir)
if err != nil {
return nil, fmt.Errorf("mkdir metadata: failed make directory: %w", err)
}
fi, err = f.lstat(localPath)
if err != nil {
return nil, fmt.Errorf("mkdir metadata: failed to read info: %w", err)
}
} else if err != nil {
return nil, err
}
// Create directory object
d := f.newDirectory(dir, fi)
// Set metadata on the directory object if provided
if metadata != nil {
err = d.writeMetadata(metadata)
if err != nil {
return nil, fmt.Errorf("failed to set metadata on directory: %w", err)
}
// Re-read info now we have finished setting stuff
err = d.lstat()
if err != nil {
return nil, fmt.Errorf("mkdir metadata: failed to re-read info: %w", err)
}
}
return d, nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
@ -720,27 +895,6 @@ func (f *Fs) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files in the directory
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
dir = f.localPath(dir)
fi, err := f.lstat(dir)
if err != nil {
// already purged
if os.IsNotExist(err) {
return fs.ErrorDirNotFound
}
return err
}
if !fi.Mode().IsDir() {
return fmt.Errorf("can't purge non directory: %q", dir)
}
return os.RemoveAll(dir)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
@ -780,6 +934,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
if err != nil {
return nil, fmt.Errorf("move: failed to read metadata: %w", err)
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if os.IsNotExist(err) {
@ -795,6 +955,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Set metadata if --metadata is in use
err = dstObj.writeMetadata(meta)
if err != nil {
return nil, fmt.Errorf("move: failed to set metadata: %w", err)
}
// Update the info
err = dstObj.lstat()
if err != nil {
@ -1068,7 +1234,7 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
}
if !oldtime.Equal(fi.ModTime()) {
if !oldtime.Equal(readTime(file.o.fs.opt.TimeType, fi)) {
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
}
}
@ -1364,7 +1530,7 @@ func (o *Object) setMetadata(info os.FileInfo) {
}
o.fs.objectMetaMu.Lock()
o.size = info.Size()
o.modTime = info.ModTime()
o.modTime = readTime(o.fs.opt.TimeType, info)
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// Read the size of the link.
@ -1433,45 +1599,109 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
return err
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
err := o.writeMetadata(metadata)
if err != nil {
return fmt.Errorf("SetMetadata failed on Object: %w", err)
}
// Re-read info now we have finished setting stuff
return o.lstat()
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
} else {
s = filepath.Clean(s)
}
}
var vol string
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
vol = filepath.VolumeName(s)
if vol == `\\?` && len(s) >= 6 {
// `\\?\C:`
vol = s[:6]
}
s = vol + enc.FromStandardPath(s[len(vol):])
s = s[len(vol):]
}
// Don't use FromStandardPath. Make sure Dot (`.`, `..`) as name will not be reencoded
// Take care of the case Standard: .// (the first dot means current directory)
if enc != encoder.Standard {
s = filepath.ToSlash(s)
parts := strings.Split(s, "/")
encoded := make([]string, len(parts))
changed := false
for i, p := range parts {
if (p == ".") || (p == "..") {
encoded[i] = p
continue
}
part := enc.FromStandardName(p)
changed = changed || part != p
encoded[i] = part
}
if changed {
s = strings.Join(encoded, "/")
}
s = filepath.FromSlash(s)
}
if runtime.GOOS == "windows" {
s = vol + s
}
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
if !noUNC {
// Convert to UNC
// Convert to UNC. It does nothing on non windows platforms.
s = file.UNCPath(s)
}
return s
}
s = enc.FromStandardPath(s)
return s
// Items returns the count of items in this directory or this
// directory and subdirectories if known, -1 for unknown
func (d *Directory) Items() int64 {
return -1
}
// ID returns the internal ID of this directory if known, or
// "" otherwise
func (d *Directory) ID() string {
return ""
}
// SetMetadata sets metadata for a Directory
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
err := d.writeMetadata(metadata)
if err != nil {
return fmt.Errorf("SetMetadata failed on Directory: %w", err)
}
// Re-read info now we have finished setting stuff
return d.lstat()
}
// Hash does nothing on a directory
//
// This method is implemented with the incorrect type signature to
// stop the Directory type asserting to fs.Object or fs.ObjectInfo
func (d *Directory) Hash() {
// Does nothing
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.DirSetModTimer = &Fs{}
_ fs.MkdirMetadataer = &Fs{}
_ fs.Object = &Object{}
_ fs.Metadataer = &Object{}
_ fs.SetMetadataer = &Object{}
_ fs.Directory = &Directory{}
_ fs.SetModTimer = &Directory{}
_ fs.SetMetadataer = &Directory{}
)

View File

@ -76,6 +76,24 @@ func TestUpdatingCheck(t *testing.T) {
}
// Test corrupted on transfer
// should error due to size/hash mismatch
func TestVerifyCopy(t *testing.T) {
t.Skip("FIXME this test is unreliable")
r := fstest.NewRun(t)
filePath := "sub dir/local test"
r.WriteFile(filePath, "some content", time.Now())
src, err := r.Flocal.NewObject(context.Background(), filePath)
require.NoError(t, err)
src.(*Object).fs.opt.NoCheckUpdated = true
for i := 0; i < 100; i++ {
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
}
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)
assert.Error(t, err)
}
func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)

View File

@ -2,6 +2,7 @@ package local
import (
"fmt"
"math"
"os"
"runtime"
"strconv"
@ -72,12 +73,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
value, ok := m[key]
if ok {
var err error
result64, err := strconv.ParseInt(value, base, 64)
parsed, err := strconv.ParseInt(value, base, 0)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
ok = false
}
result = int(result64)
result = int(parsed)
}
return result, ok
}
@ -128,11 +129,16 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
}
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
if hasMode {
err = os.Chmod(o.path, os.FileMode(mode))
if mode >= 0 {
umode := uint(mode)
if umode <= math.MaxUint32 {
err = os.Chmod(o.path, os.FileMode(umode))
if err != nil {
outErr = fmt.Errorf("failed to change permissions: %w", err)
}
}
}
}
// FIXME not parsing rdev yet
return outErr
}

View File

@ -1,16 +1,34 @@
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atimespec.Unix())
case bTime:
return time.Unix(stat.Birthtimespec.Unix())
case cTime:
return time.Unix(stat.Ctimespec.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,12 +1,13 @@
//go:build linux
// +build linux
package local
import (
"fmt"
"os"
"runtime"
"sync"
"syscall"
"time"
"github.com/rclone/rclone/fs"
@ -18,6 +19,22 @@ var (
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atim.Unix())
case cTime:
return time.Unix(stat.Ctim.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {

View File

@ -1,14 +1,20 @@
//go:build plan9 || js
// +build plan9 js
//go:build dragonfly || plan9 || js
package local
import (
"fmt"
"os"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,16 +1,32 @@
//go:build openbsd || solaris
// +build openbsd solaris
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atim.Unix())
case cTime:
return time.Unix(stat.Ctim.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,16 +1,32 @@
//go:build windows
// +build windows
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Win32FileAttributeData)
if !ok {
fs.Debugf(nil, "didn't return Win32FileAttributeData as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(0, stat.LastAccessTime.Nanoseconds())
case bTime:
return time.Unix(0, stat.CreationTime.Nanoseconds())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,7 +1,6 @@
// Device reading functions
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local

View File

@ -1,7 +1,6 @@
// Device reading functions
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package local

View File

@ -1,18 +1,13 @@
//go:build windows
// +build windows
package local
import (
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
const (
ERROR_SHARING_VIOLATION syscall.Errno = 32
"golang.org/x/sys/windows"
)
// Removes name, retrying on a sharing violation
@ -28,7 +23,7 @@ func remove(name string) (err error) {
if !ok {
break
}
if pathErr.Err != ERROR_SHARING_VIOLATION {
if pathErr.Err != windows.ERROR_SHARING_VIOLATION {
break
}
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package local

View File

@ -1,10 +1,8 @@
//go:build windows
// +build windows
package local
import (
"os"
"syscall"
"time"
)
@ -13,7 +11,13 @@ const haveSetBTime = true
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
h, err := syscall.Open(name, os.O_RDWR, 0755)
pathp, err := syscall.UTF16PtrFromString(name)
if err != nil {
return err
}
h, err := syscall.CreateFile(pathp,
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return err
}

View File

@ -1,5 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

View File

@ -1,5 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js
package local

View File

@ -1,5 +1,4 @@
//go:build !openbsd && !plan9
// +build !openbsd,!plan9
package local

View File

@ -1,7 +1,7 @@
//go:build openbsd || plan9
// +build openbsd plan9
// The pkg/xattr module doesn't compile for openbsd or plan9
//go:build openbsd || plan9
package local
import "github.com/rclone/rclone/fs"

View File

@ -46,8 +46,8 @@ import (
// Global constants
const (
minSleepPacer = 10 * time.Millisecond
maxSleepPacer = 2 * time.Second
minSleepPacer = 100 * time.Millisecond
maxSleepPacer = 5 * time.Second
decayConstPacer = 2 // bigger for slower decay, exponential
metaExpirySec = 20 * 60 // meta server expiration time
serverExpirySec = 3 * 60 // download server expiration time

Some files were not shown because too many files have changed in this diff Show More