Merge branch 'rclone:master' into master

This commit is contained in:
Fornax 2024-05-01 16:20:40 +02:00 committed by GitHub
commit 22aac8df02
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
476 changed files with 65951 additions and 1271 deletions

4
.gitattributes vendored
View File

@ -1,3 +1,7 @@
# Go writes go.mod and go.sum with lf even on windows
go.mod text eol=lf
go.sum text eol=lf
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true

View File

@ -49,7 +49,7 @@ jobs:
quicktest: true
- job_name: mac_amd64
os: macos-11
os: macos-latest
go: '>=1.22.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
@ -58,7 +58,7 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macos-11
os: macos-latest
go: '>=1.22.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
@ -124,7 +124,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@ -137,7 +137,8 @@ jobs:
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
brew install git-annex
if: matrix.os == 'macos-latest'
- name: Install Libraries on Windows
shell: powershell
@ -167,14 +168,6 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
@ -233,18 +226,50 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Code quality test
uses: golangci/golangci-lint-action@v4
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: '>=1.22.0-rc.1'
check-latest: true
cache: false
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v4
with:
version: latest
skip-cache: false # Caching enabled (which is default) on this first lint step only, it handles complete cache of build, go modules and golangci-lint analysis which was necessary to get all lint steps to properly take advantage of it
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v4
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v4
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v4
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v4
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
@ -270,14 +295,6 @@ jobs:
with:
go-version: '>=1.22.0-rc.1'
- name: Go module cache
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables
shell: bash
run: |

View File

@ -1,14 +1,14 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

1
.gitignore vendored
View File

@ -7,6 +7,7 @@ docs/public
rclone.iml
.idea
.history
.vscode
*.test
*.iml
fuzz-build.zip

View File

@ -36,7 +36,7 @@ ifdef BETA_SUBDIR
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD_ROOT := beta.rclone.org:
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
@ -167,7 +167,7 @@ website:
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
rclone -v sync docs/public www.rclone.org:
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
@ -194,8 +194,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
rclone -P copy build/ downloads.rclone.org:/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
upload_github:
./bin/upload-github $(TAG)
@ -205,7 +205,7 @@ cross: doc
beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
rclone -v copy build/ pub.rclone.org:/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release:
@ -218,18 +218,18 @@ ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)

View File

@ -1,7 +1,23 @@
<div align="center">
<sup>Special thanks to our sponsor:</sup>
<br>
<br>
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
<div>
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
</div>
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
<div>
<sup>Visit warp.dev to learn more.</sup>
</div>
</a>
<br>
<hr>
</div>
<br>
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[<img src="https://rclone.org/img/logos/warp-github-light.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-light-mode-only)
[<img src="https://rclone.org/img/logos/warp-github-dark.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-dark-mode-only)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@ -95,6 +111,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@ -54,6 +54,7 @@ import (
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob

View File

@ -1,7 +1,6 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@ -60,6 +60,7 @@ const (
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
defaultMaxAge = 24 * time.Hour
)
// Globals
@ -362,7 +363,7 @@ var retryErrorCodes = []int{
504, // Gateway Time-out
}
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// shouldRetryNoReauth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
@ -1248,7 +1249,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error {
bucket, directory := f.split(dir)
if bucket == "" {
return errors.New("can't purge from root")
@ -1266,7 +1267,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
return time.Since(time.Time(timestamp)).Hours() > 24
return time.Since(time.Time(timestamp)) > maxAge
}
// Delete Config.Transfers in parallel
@ -1289,6 +1290,21 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
}()
}
if oldOnly {
if deleteHidden && deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
} else if deleteHidden {
fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
} else if deleteUnfinished {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
} else {
fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
return nil
}
} else {
fs.Infof(f, "cleaning bucket %q of all files", bucket)
}
last := ""
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
@ -1299,14 +1315,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {
if deleteHidden && object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
}
} else {
fs.Debugf(remote, "Deleting (id %q)", object.ID)
@ -1328,12 +1344,17 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false)
return f.purge(ctx, dir, false, false, false, defaultMaxAge)
}
// CleanUp deletes all the hidden files.
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true)
return f.purge(ctx, "", true, true, true, defaultMaxAge)
}
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
}
// copy does a server-side copy from dstObj <- srcObj
@ -1763,14 +1784,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1)
}
return nil
@ -2243,8 +2264,56 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
return bucket.LifecycleRules, nil
}
var cleanupHelp = fs.CommandHelp{
Name: "cleanup",
Short: "Remove unfinished large file uploads.",
Long: `This command removes unfinished large file uploads of age greater than
max-age, which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
}
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
maxAge := defaultMaxAge
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, false, true, maxAge)
}
var cleanupHiddenHelp = fs.CommandHelp{
Name: "cleanup-hidden",
Short: "Remove old versions of files.",
Long: `This command removes any old hidden versions of files.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup-hidden b2:bucket/path/to/dir
`,
}
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
return nil, f.cleanUp(ctx, true, false, 0)
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
cleanupHelp,
cleanupHiddenHelp,
}
// Command the backend to run a named command
@ -2260,6 +2329,10 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
case "cleanup":
return f.cleanupCommand(ctx, name, arg, opt)
case "cleanup-hidden":
return f.cleanupHiddenCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}

View File

@ -1,11 +1,25 @@
package b2
import (
"context"
"crypto/sha1"
"fmt"
"path"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test b2 string encoding
@ -170,9 +184,234 @@ func TestParseTimeString(t *testing.T) {
}
// This is adapted from the s3 equivalent.
func (f *Fs) InternalTestMetadata(t *testing.T) {
ctx := context.Background()
original := random.String(1000)
contents := fstest.Gz(t, original)
mimeType := "text/html"
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
btime := time.Now()
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, mimeType, nil)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.getMetaData(ctx)
require.NoError(t, err)
// We currently have a limited amount of metadata to test with B2
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header
var mtime api.Timestamp
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
if err != nil {
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
}
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
// Upload time
gotBtime := time.Time(gotMetadata.UploadTimestamp)
dt := gotBtime.Sub(btime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
t.Run("GzipEncoding", func(t *testing.T) {
// Test that the gzipped file we uploaded can be
// downloaded
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
gotContents := fstests.ReadObject(ctx, t, o, -1)
assert.Equal(t, wantContents, gotContents)
assert.Equal(t, wantSize, o.Size())
gotHash, err := o.Hash(ctx, hash.SHA1)
require.NoError(t, err)
assert.Equal(t, wantHash, gotHash)
}
t.Run("NoDecompress", func(t *testing.T) {
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
})
})
}
func sha1Sum(t *testing.T, s string) string {
hash := sha1.Sum([]byte(s))
return fmt.Sprintf("%x", hash)
}
// This is adapted from the s3 equivalent.
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()
// Small pause to make the LastModified different since AWS
// only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// Create an object
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
objMetadata, err := obj.(*Object).getMetaData(ctx)
require.NoError(t, err)
// Small pause
time.Sleep(2 * time.Second)
// Remove it
assert.NoError(t, obj.Remove(ctx))
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// And create it with different size and contents
newContents := random.String(101)
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
require.NoError(t, err)
t.Run("Versions", func(t *testing.T) {
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Read the contents
entries, err := f.List(ctx, dirName)
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
tests++
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
t.Run("ReadVersion", func(t *testing.T) {
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
fileNameVersion = remote
tests++
}
}
assert.Equal(t, 2, tests, "object missing from listing")
// Check we can read the object with a version suffix
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, fileNameVersion)
require.NoError(t, err)
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --b2-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {
// We set --b2-version-at for this test so make sure we reset it at the end
defer func() {
f.opt.VersionAt = fs.Time{}
}()
var (
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
)
for _, test := range []struct {
what string
at time.Time
want []fstest.Item
wantErr error
wantSize int64
}{
{
what: "Before",
at: firstObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterOne",
at: firstObjectTime.Add(time.Second),
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
wantSize: 100,
},
{
what: "AfterDelete",
at: secondObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterTwo",
at: secondObjectTime.Add(time.Second),
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
wantSize: 101,
},
} {
t.Run(test.what, func(t *testing.T) {
f.opt.VersionAt = fs.Time(test.at)
t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want)
})
// b2 NewObject doesn't work with VersionAt
//t.Run("NewObject", func(t *testing.T) {
// gotObj, gotErr := f.NewObject(ctx, fileName)
// assert.Equal(t, test.wantErr, gotErr)
// if gotErr == nil {
// assert.Equal(t, test.wantSize, gotObj.Size())
// }
//})
})
}
})
t.Run("Cleanup", func(t *testing.T) {
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
fstest.CheckListing(t, f, items)
})
// Purge gets tested later
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
// Internal tests go here
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test

View File

@ -1,7 +1,6 @@
// Test Cache filesystem interface
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
// Package cache implements a virtual provider to cache existing remotes.
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@ -119,7 +118,7 @@ func (r *Handle) startReadWorkers() {
r.scaleWorkers(totalWorkers)
}
// scaleOutWorkers will increase the worker pool count by the provided amount
// scaleWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := r.workers
if current == desired {

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@ -1,3 +1,6 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import bolt "go.etcd.io/bbolt"

View File

@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
)
// Chunker's composite files have one or more chunks
@ -101,8 +102,10 @@ var (
//
// And still chunker's primary function is to chunk large files
// rather than serve as a generic metadata container.
const maxMetadataSize = 1023
const maxMetadataSizeWritten = 255
const (
maxMetadataSize = 1023
maxMetadataSizeWritten = 255
)
// Current/highest supported metadata format.
const metadataVersion = 2
@ -317,11 +320,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// i.e. `rpath` does not exist in the wrapped remote, but chunker
// detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
if err == nil && !f.useMeta {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
if testErr == fs.ErrorIsFile {
f.base = newBase
err = testErr
cache.PinUntilFinalized(f.base, f)
}
}
@ -959,6 +964,11 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
if caseInsensitive {
sameMain = strings.EqualFold(mainRemote, remote)
if sameMain && f.base.Features().IsLocal {
// on local, make sure the EqualFold still holds true when accounting for encoding.
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
}
} else {
sameMain = mainRemote == remote
}
@ -972,7 +982,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
continue
}
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
if err := o.addChunk(entry, chunkNo); err != nil {
return nil, err
}
@ -1134,8 +1144,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
basePut putFn, action string, target fs.Object,
) (obj fs.Object, err error) {
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, fmt.Errorf("%s refused: %w", action, err)
@ -1956,7 +1966,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path)
metaXactID := ""

View File

@ -455,7 +455,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
}
return nil
}

View File

@ -520,7 +520,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}

View File

@ -1919,7 +1919,7 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
return "", "", isDocument
}
// findExportFormatByMimeType works out the optimum export settings
// findExportFormat works out the optimum export settings
// for the given drive.File.
//
// Look through the exportExtensions and find the first format that can be

View File

@ -9,6 +9,8 @@ import (
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/sync/errgroup"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
@ -37,7 +39,7 @@ var systemMetadataInfo = map[string]fs.MetadataHelp{
Example: "true",
},
"writers-can-share": {
Help: "Whether users with only writer permission can modify the file's permissions. Not populated for items in shared drives.",
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
Type: "boolean",
Example: "false",
},
@ -135,23 +137,30 @@ func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, use
// Set the permissions on the info
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
errs := errcount.New()
for _, perm := range permissions {
if perm.Role == "owner" {
// ignore owner permissions - these are set with owner
continue
}
cleanPermissionForWrite(perm)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, perm).
err := f.pacer.Call(func() (bool, error) {
_, err := f.svc.Permissions.Create(info.Id, perm).
SupportsAllDrives(true).
SendNotificationEmail(false).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set permission: %w", err)
fs.Errorf(f, "Failed to set permission: %v", err)
errs.Add(err)
}
}
return nil
err = errs.Err("failed to set permission")
if err != nil {
err = fserrors.NoRetryError(err)
}
return err
}
// Clean attributes from permissions which we can't write
@ -527,8 +536,12 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
return nil, err
}
case "writers-can-share":
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
if !f.isTeamDrive {
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
}
} else {
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
}
case "viewed-by-me":
// Can't write this

View File

@ -644,7 +644,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// listSharedFolders lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
started := false

View File

@ -38,7 +38,7 @@ type dirPattern struct {
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
// dirPatterns is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs
@ -150,7 +149,7 @@ func (f *Fs) Root() string {
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
return fmt.Sprintf("hdfs://%s/%s", f.opt.Namenode, f.root)
}
// Features returns the optional features of this Fs
@ -210,7 +209,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
size: x.Size(),
modTime: x.ModTime()})
modTime: x.ModTime(),
})
}
}
return entries, nil

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs

View File

@ -1,7 +1,6 @@
// Test HDFS filesystem interface
//go:build !plan9
// +build !plan9
package hdfs_test

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9
// +build plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package hdfs

View File

@ -89,6 +89,10 @@ that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
Default: false,
Advanced: true,
}, {
Name: "no_escape",
Help: "Do not escape URL metacharacters in path names.",
Default: false,
}},
}
fs.Register(fsi)
@ -100,6 +104,7 @@ type Options struct {
NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
NoEscape bool `config:"no_escape"`
}
// Fs stores the interface to the remote HTTP files
@ -326,6 +331,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Join's the remote onto the base URL
func (f *Fs) url(remote string) string {
if f.opt.NoEscape {
// Directly concatenate without escaping, no_escape behavior
return f.endpointURL + remote
}
// Default behavior
return f.endpointURL + rest.URLPathEscape(remote)
}

View File

@ -67,13 +67,13 @@ func init() {
Sensitive: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Help: "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.",
Provider: "koofr",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
Help: "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.",
Provider: "digistorage",
IsPassword: true,
Required: true,

View File

@ -36,7 +36,7 @@ import (
)
const (
maxEntitiesPerPage = 1024
maxEntitiesPerPage = 1000
minSleep = 200 * time.Millisecond
maxSleep = 2 * time.Second
pacerBurst = 1
@ -219,7 +219,8 @@ type listAllFn func(*entity) bool
// Search is a bit fussy about which characters match
//
// If the name doesn't match this then do an dir list instead
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ .]+$`)
// N.B.: Linkbox doesn't support search by name that is longer than 50 chars
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ -.]{1,50}$`)
// Lists the directory required calling the user function on each item found
//
@ -238,6 +239,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, name string, fn listAllF
// If name isn't good then do an unbounded search
name = ""
}
OUTER:
for numberOfEntities == maxEntitiesPerPage {
pageNumber++
@ -258,7 +260,6 @@ OUTER:
err = getUnmarshaledResponse(ctx, f, opts, &responseResult)
if err != nil {
return false, fmt.Errorf("getting files failed: %w", err)
}
numberOfEntities = len(responseResult.SearchData.Entities)

View File

@ -13,5 +13,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestLinkbox:",
NilObject: (*linkbox.Object)(nil),
// Linkbox doesn't support leading dots for files
SkipLeadingDot: true,
})
}

View File

@ -1,5 +1,4 @@
//go:build darwin || dragonfly || freebsd || linux
// +build darwin dragonfly freebsd linux
package local
@ -24,9 +23,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
bs := int64(s.Bsize) // nolint: unconvert
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(bs * int64(s.Blocks)), //nolint: unconvert // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), //nolint: unconvert // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), //nolint: unconvert // bytes which can be uploaded before reaching the quota
}
return usage, nil
}

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package local

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package local

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package local

View File

@ -1,5 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js
package local

View File

@ -1,5 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

View File

@ -36,6 +36,27 @@ const devUnset = 0xdeadbeefcafebabe // a d
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
// timeType allows the user to choose what exactly ModTime() returns
type timeType = fs.Enum[timeTypeChoices]
const (
mTime timeType = iota
aTime
bTime
cTime
)
type timeTypeChoices struct{}
func (timeTypeChoices) Choices() []string {
return []string{
mTime: "mtime",
aTime: "atime",
bTime: "btime",
cTime: "ctime",
}
}
// Register with Fs
func init() {
fsi := &fs.RegInfo{
@ -213,6 +234,42 @@ when copying to a CIFS mount owned by another user. If this option is
enabled, rclone will no longer update the modtime after copying a file.`,
Default: false,
Advanced: true,
}, {
Name: "time_type",
Help: `Set what kind of time is returned.
Normally rclone does all operations on the mtime or Modification time.
If you set this flag then rclone will return the Modified time as whatever
you set here. So if you use "rclone lsl --local-time-type ctime" then
you will see ctimes in the listing.
If the OS doesn't support returning the time_type specified then rclone
will silently replace it with the modification time which all OSes support.
- mtime is supported by all OSes
- atime is supported on all OSes except: plan9, js
- btime is only supported on: Windows, macOS, freebsd, netbsd
- ctime is supported on all Oses except: Windows, plan9, js
Note that setting the time will still set the modified time so this is
only useful for reading.
`,
Default: mTime,
Advanced: true,
Examples: []fs.OptionExample{{
Value: mTime.String(),
Help: "The last modification time.",
}, {
Value: aTime.String(),
Help: "The last access time.",
}, {
Value: bTime.String(),
Help: "The creation time.",
}, {
Value: cTime.String(),
Help: "The last status change time.",
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@ -237,6 +294,7 @@ type Options struct {
NoPreAllocate bool `config:"no_preallocate"`
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
TimeType timeType `config:"time_type"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@ -1132,7 +1190,7 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
}
if !oldtime.Equal(fi.ModTime()) {
if !oldtime.Equal(readTime(file.o.fs.opt.TimeType, fi)) {
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
}
}
@ -1428,7 +1486,7 @@ func (o *Object) setMetadata(info os.FileInfo) {
}
o.fs.objectMetaMu.Lock()
o.size = info.Size()
o.modTime = info.ModTime()
o.modTime = readTime(o.fs.opt.TimeType, info)
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// Read the size of the link.

View File

@ -76,6 +76,24 @@ func TestUpdatingCheck(t *testing.T) {
}
// Test corrupted on transfer
// should error due to size/hash mismatch
func TestVerifyCopy(t *testing.T) {
t.Skip("FIXME this test is unreliable")
r := fstest.NewRun(t)
filePath := "sub dir/local test"
r.WriteFile(filePath, "some content", time.Now())
src, err := r.Flocal.NewObject(context.Background(), filePath)
require.NoError(t, err)
src.(*Object).fs.opt.NoCheckUpdated = true
for i := 0; i < 100; i++ {
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
}
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)
assert.Error(t, err)
}
func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)

View File

@ -1,16 +1,34 @@
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atimespec.Unix())
case bTime:
return time.Unix(stat.Birthtimespec.Unix())
case cTime:
return time.Unix(stat.Ctimespec.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,12 +1,13 @@
//go:build linux
// +build linux
package local
import (
"fmt"
"os"
"runtime"
"sync"
"syscall"
"time"
"github.com/rclone/rclone/fs"
@ -18,6 +19,22 @@ var (
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atim.Unix())
case cTime:
return time.Unix(stat.Ctim.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {

View File

@ -1,14 +1,20 @@
//go:build plan9 || js
// +build plan9 js
//go:build dragonfly || plan9 || js
package local
import (
"fmt"
"os"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,16 +1,32 @@
//go:build openbsd || solaris
// +build openbsd solaris
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(nil, "didn't return Stat_t as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(stat.Atim.Unix())
case cTime:
return time.Unix(stat.Ctim.Unix())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,16 +1,32 @@
//go:build windows
// +build windows
package local
import (
"fmt"
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the time specified from the os.FileInfo
func readTime(t timeType, fi os.FileInfo) time.Time {
stat, ok := fi.Sys().(*syscall.Win32FileAttributeData)
if !ok {
fs.Debugf(nil, "didn't return Win32FileAttributeData as expected")
return fi.ModTime()
}
switch t {
case aTime:
return time.Unix(0, stat.LastAccessTime.Nanoseconds())
case bTime:
return time.Unix(0, stat.CreationTime.Nanoseconds())
}
return fi.ModTime()
}
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)

View File

@ -1,7 +1,6 @@
// Device reading functions
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local

View File

@ -1,7 +1,6 @@
// Device reading functions
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package local

View File

@ -1,18 +1,13 @@
//go:build windows
// +build windows
package local
import (
"os"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
const (
ERROR_SHARING_VIOLATION syscall.Errno = 32
"golang.org/x/sys/windows"
)
// Removes name, retrying on a sharing violation
@ -28,7 +23,7 @@ func remove(name string) (err error) {
if !ok {
break
}
if pathErr.Err != ERROR_SHARING_VIOLATION {
if pathErr.Err != windows.ERROR_SHARING_VIOLATION {
break
}
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package local

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package local

View File

@ -1,5 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js
package local

View File

@ -1,5 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js
package local

View File

@ -1,5 +1,4 @@
//go:build !openbsd && !plan9
// +build !openbsd,!plan9
package local

View File

@ -1,7 +1,7 @@
//go:build openbsd || plan9
// +build openbsd plan9
// The pkg/xattr module doesn't compile for openbsd or plan9
//go:build openbsd || plan9
package local
import "github.com/rclone/rclone/fs"

View File

@ -38,8 +38,7 @@ func init() {
}
// Options defines the configuration for this backend
type Options struct {
}
type Options struct{}
// Fs represents a remote memory server
type Fs struct {
@ -297,7 +296,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
slash := strings.IndexRune(localPath, '/')
if slash >= 0 {
// send a directory if have a slash
dir := directory + localPath[:slash]
dir := strings.TrimPrefix(directory, f.rootDirectory+"/") + localPath[:slash]
if addBucket {
dir = path.Join(bucket, dir)
}
@ -385,10 +384,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
entries := fs.DirEntries{}
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {
return list.Add(entry)
err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {
entries = append(entries, entry) // can't list.Add here -- could deadlock
return nil
})
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
return nil
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
@ -482,7 +493,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if od == nil {
return nil, fs.ErrorObjectNotFound
}
buckets.updateObjectData(dstBucket, dstPath, od)
odCopy := *od
buckets.updateObjectData(dstBucket, dstPath, &odCopy)
return f.NewObject(ctx, remote)
}

View File

@ -0,0 +1,40 @@
package memory
import (
"context"
"fmt"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/require"
)
var t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PurgeListDeadlock", func(t *testing.T) {
testPurgeListDeadlock(t)
})
}
// test that Purge fallback does not result in deadlock from concurrently listing and removing
func testPurgeListDeadlock(t *testing.T) {
ctx := context.Background()
r := fstest.NewRunIndividual(t)
r.Mkdir(ctx, r.Fremote)
r.Fremote.Features().Disable("Purge") // force fallback-purge
// make a lot of files to prevent it from finishing too quickly
for i := 0; i < 100; i++ {
dst := "file" + fmt.Sprint(i) + ".txt"
r.WriteObject(ctx, dst, "hello", t1)
}
require.NoError(t, operations.Purge(ctx, r.Fremote, ""))
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@ -443,7 +443,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
URL := f.url(dir)
files, err := f.netStorageDirRequest(ctx, dir, URL)
files, err := f.netStorageDirRequest(ctx, URL)
if err != nil {
return nil, err
}
@ -932,7 +932,7 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
}
// netStorageDirRequest performs a NetStorage dir request
func (f *Fs) netStorageDirRequest(ctx context.Context, dir string, URL string) ([]File, error) {
func (f *Fs) netStorageDirRequest(ctx context.Context, URL string) ([]File, error) {
const actionHeader = "version=1&action=dir&format=xml&encoding=utf-8"
statResp := &Stat{}
if _, err := f.callBackend(ctx, URL, "GET", actionHeader, false, statResp, nil); err != nil {

View File

@ -11,7 +11,9 @@ import (
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
)
@ -432,17 +434,21 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
// processPermissions executes the add, update, and remove queues for writing permissions
func (m *Metadata) processPermissions(ctx context.Context, add, update, remove []*api.PermissionsType) (newPermissions []*api.PermissionsType, err error) {
errs := errcount.New()
for _, p := range remove { // remove (need to do these first because of remove + add workaround)
_, err := m.removePermission(ctx, p)
if err != nil {
return newPermissions, err
fs.Errorf(m.remote, "Failed to remove permission: %v", err)
errs.Add(err)
}
}
for _, p := range add { // add
newPs, _, err := m.addPermission(ctx, p)
if err != nil {
return newPermissions, err
fs.Errorf(m.remote, "Failed to add permission: %v", err)
errs.Add(err)
continue
}
newPermissions = append(newPermissions, newPs...)
}
@ -450,11 +456,17 @@ func (m *Metadata) processPermissions(ctx context.Context, add, update, remove [
for _, p := range update { // update
newP, _, err := m.updatePermission(ctx, p)
if err != nil {
return newPermissions, err
fs.Errorf(m.remote, "Failed to update permission: %v", err)
errs.Add(err)
continue
}
newPermissions = append(newPermissions, newP)
}
err = errs.Err("failed to set permissions")
if err != nil {
err = fserrors.NoRetryError(err)
}
return newPermissions, err
}
@ -613,7 +625,7 @@ func (o *Object) tryGetBtime(modTime time.Time) time.Time {
}
// adds metadata (except permissions) if --metadata is in use
func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, modTime time.Time) (createRequest api.CreateUploadRequest, err error) {
func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, modTime time.Time) (createRequest api.CreateUploadRequest, metadata fs.Metadata, err error) {
createRequest = api.CreateUploadRequest{ // we set mtime no matter what
Item: api.Metadata{
FileSystemInfo: &api.FileSystemInfoFacet{
@ -625,10 +637,10 @@ func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo,
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return createRequest, fmt.Errorf("failed to read metadata from source object: %w", err)
return createRequest, nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
if meta == nil {
return createRequest, nil // no metadata or --metadata not in use, so just return mtime
return createRequest, nil, nil // no metadata or --metadata not in use, so just return mtime
}
if o.meta == nil {
o.meta = o.fs.newMetadata(o.Remote())
@ -636,13 +648,13 @@ func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo,
o.meta.mtime = modTime
numSet, err := o.meta.Set(ctx, meta)
if err != nil {
return createRequest, err
return createRequest, meta, err
}
if numSet == 0 {
return createRequest, nil
return createRequest, meta, nil
}
createRequest.Item = o.meta.toAPIMetadata()
return createRequest, nil
return createRequest, meta, nil
}
// Fetch metadata and update updateInfo if --metadata is in use
@ -665,27 +677,6 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
return newInfo, err
}
// Fetch and update permissions if --metadata is in use
// This is similar to fetchAndUpdateMetadata, except it does NOT set modtime or other metadata if there are no permissions to set.
// This is intended for cases where metadata may already have been set during upload and an extra step is needed only for permissions.
func (f *Fs) fetchAndUpdatePermissions(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
if meta == nil || !f.needsUpdatePermissions(meta) {
return nil, nil // no metadata, --metadata not in use, or wrong flags
}
if updateInfo.meta == nil {
updateInfo.meta = f.newMetadata(updateInfo.Remote())
}
newInfo, err := updateInfo.updateMetadata(ctx, meta)
if newInfo == nil {
return info, err
}
return newInfo, err
}
// updateMetadata calls Get, Set, and Write
func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) {
_, err = o.meta.Get(ctx) // refresh permissions

View File

@ -4,11 +4,11 @@ differences between OneDrive Personal and Business (see table below for
details).
Permissions are also supported, if `--onedrive-metadata-permissions` is set. The
accepted values for `--onedrive-metadata-permissions` are `read`, `write`,
`read,write`, and `off` (the default). `write` supports adding new permissions,
accepted values for `--onedrive-metadata-permissions` are "`read`", "`write`",
"`read,write`", and "`off`" (the default). "`write`" supports adding new permissions,
updating the "role" of existing permissions, and removing permissions. Updating
and removing require the Permission ID to be known, so it is recommended to use
`read,write` instead of `write` if you wish to update/remove permissions.
"`read,write`" instead of "`write`" if you wish to update/remove permissions.
Permissions are read/written in JSON format using the same schema as the
[OneDrive API](https://learn.microsoft.com/en-us/onedrive/developer/rest-api/resources/permission?view=odsp-graph-online),
@ -92,31 +92,14 @@ an ObjectID can be provided in `User.ID`. At least one valid recipient must be
provided in order to add a permission for a user. Creating a Public Link is also
supported, if `Link.Scope` is set to `"anonymous"`.
Example request to add a "read" permission:
Example request to add a "read" permission with `--metadata-mapper`:
```json
[
{
"id": "",
"grantedTo": {
"user": {},
"application": {},
"device": {}
},
"grantedToIdentities": [
{
"user": {
"id": "ryan@contoso.com"
},
"application": {},
"device": {}
}
],
"roles": [
"read"
]
}
]
{
"Metadata": {
"permissions": "[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"
}
}
```
Note that adding a permission can fail if a conflicting permission already

View File

@ -213,9 +213,11 @@ listing, set this option.`,
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND
the files to copy are already shared between them. In other cases, rclone will
fall back to normal copy (which will be slightly slower).`,
This will work if you are copying between two OneDrive *Personal* drives AND the files to
copy are already shared between them. Additionally, it should also function for a user who
has access permissions both between Onedrive for *business* and *SharePoint* under the *same
tenant*, and between *SharePoint* and another *SharePoint* under the *same tenant*. In other
cases, rclone will fall back to normal copy (which will be slightly slower).`,
Advanced: true,
}, {
Name: "list_chunk",
@ -289,7 +291,7 @@ all onedrive types. If an SHA1 hash is desired then set this option
accordingly.
From July 2023 QuickXorHash will be the only available hash for
both OneDrive for Business and OneDriver Personal.
both OneDrive for Business and OneDrive Personal.
This can be set to "none" to not use any hashes.
@ -1591,14 +1593,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
if f.driveType != srcObj.fs.driveType {
fs.Debugf(src, "Can't server-side copy - drive types differ")
return nil, fs.ErrorCantCopy
}
// For OneDrive Business, this is only supported within the same drive
if f.driveType != driveTypePersonal && srcObj.fs.driveID != f.driveID {
fs.Debugf(src, "Can't server-side copy - cross-drive but not OneDrive Personal")
if (f.driveType == driveTypePersonal && srcObj.fs.driveType != driveTypePersonal) || (f.driveType != driveTypePersonal && srcObj.fs.driveType == driveTypePersonal) {
fs.Debugf(src, "Can't server-side copy - cross-drive between OneDrive Personal and OneDrive for business (SharePoint)")
return nil, fs.ErrorCantCopy
} else if f.driveType == driveTypeBusiness && srcObj.fs.driveType == driveTypeBusiness && srcObj.fs.driveID != f.driveID {
fs.Debugf(src, "Can't server-side copy - cross-drive between difference OneDrive for business (Not SharePoint)")
return nil, fs.ErrorCantCopy
}
@ -2254,9 +2254,23 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.fs.opt.AVOverride {
opts.Parameters = url.Values{"AVOverride": {"1"}}
}
// Make a note of the redirect target as we need to call it without Auth
var redirectReq *http.Request
opts.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
req.Header.Del("Authorization") // remove Auth header
redirectReq = req
return http.ErrUseLastResponse
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if redirectReq != nil {
// It is a redirect which we are expecting
err = nil
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
@ -2267,6 +2281,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
return nil, err
}
if redirectReq != nil {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.unAuth.Do(redirectReq)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
}
}
return nil, err
}
}
if resp.StatusCode == http.StatusOK && resp.ContentLength > 0 && resp.Header.Get("Content-Range") == "" {
// Overwrite size with actual size since size readings from Onedrive is unreliable.
@ -2276,11 +2304,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, src fs.ObjectInfo, modTime time.Time) (response *api.CreateUploadResponse, err error) {
func (o *Object) createUploadSession(ctx context.Context, src fs.ObjectInfo, modTime time.Time) (response *api.CreateUploadResponse, metadata fs.Metadata, err error) {
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
createRequest, err := o.fetchMetadataForCreate(ctx, src, opts.Options, modTime)
createRequest, metadata, err := o.fetchMetadataForCreate(ctx, src, opts.Options, modTime)
if err != nil {
return nil, err
return nil, metadata, err
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@ -2293,7 +2321,7 @@ func (o *Object) createUploadSession(ctx context.Context, src fs.ObjectInfo, mod
}
return shouldRetry(ctx, resp, err)
})
return response, err
return response, metadata, err
}
// getPosition gets the current position in a multipart upload
@ -2409,7 +2437,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
// Create upload session
fs.Debugf(o, "Starting multipart upload")
session, err := o.createUploadSession(ctx, src, modTime)
session, metadata, err := o.createUploadSession(ctx, src, modTime)
if err != nil {
return nil, err
}
@ -2446,10 +2474,10 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
if err != nil {
return info, err
}
if !o.fs.opt.MetadataPermissions.IsSet(rwWrite) {
if metadata == nil || !o.fs.needsUpdatePermissions(metadata) {
return info, err
}
info, err = o.fs.fetchAndUpdatePermissions(ctx, src, options, o) // for permissions, which can't be set during original upload
info, err = o.updateMetadata(ctx, metadata) // for permissions, which can't be set during original upload
if info == nil {
return nil, err
}

View File

@ -323,6 +323,49 @@ func (f *Fs) TestServerSideCopyMove(t *testing.T, r *fstest.Run) {
f.compareMeta(t, expectedMeta, actualMeta, true)
}
// TestMetadataMapper tests adding permissions with the --metadata-mapper
func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) {
// setup
ctx, ci := fs.AddConfig(ctx)
ci.Metadata = true
_ = f.opt.MetadataPermissions.Set("read,write")
file1 := r.WriteFile(randomFilename(), content, t2)
const blob = `{"Metadata":{"permissions":"[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}`
// Copy
ci.MetadataMapper = []string{"echo", blob}
require.NoError(t, ci.Dump.Set("mapper"))
obj1, err := r.Flocal.NewObject(ctx, file1.Path)
assert.NoError(t, err)
obj2, err := operations.Copy(ctx, f, nil, randomFilename(), obj1)
assert.NoError(t, err)
actualMeta, err := fs.GetMetadata(ctx, obj2)
assert.NoError(t, err)
actualP := unmarshalPerms(t, actualMeta["permissions"])
found := false
foundCount := 0
for _, p := range actualP {
for _, identity := range p.GrantedToIdentities {
if identity.User.DisplayName == testUserID {
assert.Equal(t, []api.Role{api.ReadRole}, p.Roles)
found = true
foundCount++
}
}
if f.driveType == driveTypePersonal {
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) && p.GrantedTo.User.ID == testUserID { // shows up in a different place on biz vs. personal
assert.Equal(t, []api.Role{api.ReadRole}, p.Roles)
found = true
foundCount++
}
}
}
assert.True(t, found, fmt.Sprintf("no permission found with expected role (want: \n\n%v \n\ngot: \n\n%v\n\n)", blob, actualMeta))
assert.Equal(t, 1, foundCount, "expected to find exactly 1 match")
}
// helper function to put an object with metadata and permissions
func (f *Fs) putWithMeta(ctx context.Context, t *testing.T, file *fstest.Item, perms []*api.PermissionsType) (expectedMeta, actualMeta fs.Metadata) {
t.Helper()
@ -459,6 +502,8 @@ func (f *Fs) InternalTest(t *testing.T) {
testF, r = newTestF()
t.Run("TestServerSideCopyMove", func(t *testing.T) { testF.TestServerSideCopyMove(t, r) })
testF.resetTestDefaults(r)
t.Run("TestMetadataMapper", func(t *testing.T) { testF.TestMetadataMapper(t, r) })
testF.resetTestDefaults(r)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package oracleobjectstorage provides an interface to the OCI object storage system.
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
// Package oracleobjectstorage provides an interface to the OCI object storage system.
package oracleobjectstorage

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage

View File

@ -181,7 +181,7 @@ func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
return
}
// requestShare returns information about ssharable links
// requestShare returns information about sharable links
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
opts := rest.Opts{
Method: "POST",

View File

@ -1,5 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/

View File

@ -1,7 +1,6 @@
// Test QingStor filesystem interface
//go:build !plan9 && !js
// +build !plan9,!js
package qingstor

View File

@ -2,6 +2,7 @@
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
// Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
package qingstor

View File

@ -1,7 +1,6 @@
// Upload object to QingStor
//go:build !plan9 && !js
// +build !plan9,!js
package qingstor

View File

@ -1,7 +1,6 @@
// Generate boilerplate code for setting similar structs from each other
//go:build ignore
// +build ignore
package main

View File

@ -289,6 +289,9 @@ func init() {
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.\nNeeds location constraint sa-east-1.",
}, {
Value: "il-central-1",
Help: "Israel (Tel Aviv) Region.\nNeeds location constraint il-central-1.",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region.\nNeeds location constraint me-south-1.",
@ -1453,6 +1456,9 @@ func init() {
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region",
}, {
Value: "il-central-1",
Help: "Israel (Tel Aviv) Region",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region",
@ -3022,6 +3028,14 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkCopyCutoff(cs fs.SizeSuffix) error {
minCopySize := fs.SizeSuffixBase
if cs < minCopySize {
return fmt.Errorf("value is too small (%v is less than %v)", cs, minCopySize)
}
return nil
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
@ -3182,8 +3196,8 @@ func setQuirks(opt *Options) {
if opt.MaxUploadParts > 1000 {
opt.MaxUploadParts = 1000
}
urlEncodeListings = false
useAlreadyExists = false // untested
urlEncodeListings = true
useAlreadyExists = true
case "SeaweedFS":
listObjectsV2 = false // untested
virtualHostStyle = false
@ -3322,6 +3336,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("s3: upload cutoff: %w", err)
}
err = checkCopyCutoff(opt.CopyCutoff)
if err != nil {
return nil, fmt.Errorf("s3: --s3-copy-cutoff: %w", err)
}
if opt.Versions && opt.VersionAt.IsSet() {
return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time")
}

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
// Package sftp provides a filesystem interface using github.com/pkg/sftp
package sftp

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package sftp

View File

@ -1,7 +1,6 @@
// Test Sftp filesystem interface
//go:build !plan9
// +build !plan9
package sftp_test

View File

@ -2,6 +2,6 @@
// about "no buildable Go source files "
//go:build plan9
// +build plan9
// Package sftp provides a filesystem interface using github.com/pkg/sftp
package sftp

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package sftp

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package sftp

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package sftp

Some files were not shown because too many files have changed in this diff Show More