mirror of
https://github.com/rclone/rclone
synced 2024-12-21 11:45:56 +01:00
Implement WebDAV remote #580
This has special knowledge of Owncloud and Nextcloud to enable more functionality such as mod times.
This commit is contained in:
parent
bcdd79320b
commit
7f3dc9b5c4
@ -31,6 +31,7 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
|
@ -38,6 +38,7 @@ docs = [
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"sftp.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
|
||||
"local.md",
|
||||
|
@ -57,6 +57,7 @@ from various cloud storage systems and using file transfer services, such as:
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
|
@ -29,14 +29,17 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||
* {{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
|
||||
* {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
* {{< provider name="Nextloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}}
|
||||
* {{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
|
||||
* {{< provider name="Openstack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
|
||||
* {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
* {{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/storage-opc" config="/swift/" >}}
|
||||
* {{< provider name="Ownloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
|
||||
* {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
* {{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
||||
* {{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
||||
* {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SFTP" config="/sftp/" >}}
|
||||
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
* {{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||
* {{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||
* {{< provider name="The local filesystem" home="/local/" config="/local/" >}}
|
||||
|
||||
|
@ -36,6 +36,7 @@ See the following for detailed instructions for
|
||||
* [Pcloud](/pcloud/)
|
||||
* [QingStor](/qingstor/)
|
||||
* [SFTP](/sftp/)
|
||||
* [WebDAV](/webdav/)
|
||||
* [Yandex Disk](/yandex/)
|
||||
* [The local filesystem](/local/)
|
||||
|
||||
|
@ -33,6 +33,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| pCloud | MD5, SHA1 | Yes | No | No | W |
|
||||
| QingStor | MD5 | No | No | No | R/W |
|
||||
| SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - |
|
||||
| WebDAV | - | Yes †† | Depends | No | - |
|
||||
| Yandex Disk | MD5 | Yes | No | No | R/W |
|
||||
| The local filesystem | All | Yes | Depends | No | - |
|
||||
|
||||
@ -53,6 +54,8 @@ This is an SHA256 sum of all the 4MB block SHA256s.
|
||||
‡ SFTP supports checksums if the same login has shell access and `md5sum`
|
||||
or `sha1sum` as well as `echo` are in the remote's PATH.
|
||||
|
||||
†† WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
||||
|
||||
### ModTime ###
|
||||
|
||||
The cloud storage system supports setting modification times on
|
||||
@ -134,6 +137,7 @@ operations more efficient.
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| QingStor | No | Yes | No | No | No | Yes | No |
|
||||
| SFTP | No | No | Yes | Yes | No | No | Yes |
|
||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ |
|
||||
| Yandex Disk | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| The local filesystem | Yes | No | Yes | Yes | No | No | Yes |
|
||||
|
||||
@ -146,6 +150,8 @@ the directory.
|
||||
markers but they don't actually have a quicker way of deleting files
|
||||
other than deleting them individually.
|
||||
|
||||
‡ StreamUpload is not supported with Nextcloud
|
||||
|
||||
### Copy ###
|
||||
|
||||
Used when copying an object to and from the same remote. This known
|
||||
|
148
docs/content/webdav.md
Normal file
148
docs/content/webdav.md
Normal file
@ -0,0 +1,148 @@
|
||||
---
|
||||
title: "WebDAV"
|
||||
description: "Rclone docs for WebDAV"
|
||||
date: "2017-10-01"
|
||||
---
|
||||
|
||||
<i class="fa fa-globe"></i> WebDAV
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
To configure the WebDAV remote you will need to have a URL for it, and
|
||||
a username and password. If you know what kind of system you are
|
||||
connecting to then rclone can enable extra features.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Box
|
||||
\ "box"
|
||||
5 / Dropbox
|
||||
\ "dropbox"
|
||||
6 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
7 / FTP Connection
|
||||
\ "ftp"
|
||||
8 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
9 / Google Drive
|
||||
\ "drive"
|
||||
10 / Hubic
|
||||
\ "hubic"
|
||||
11 / Local Disk
|
||||
\ "local"
|
||||
12 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
13 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
14 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
15 / Pcloud
|
||||
\ "pcloud"
|
||||
16 / QingClound Object Storage
|
||||
\ "qingstor"
|
||||
17 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
18 / WebDAV
|
||||
\ "webdav"
|
||||
19 / Yandex Disk
|
||||
\ "yandex"
|
||||
20 / http Connection
|
||||
\ "http"
|
||||
Storage> webdav
|
||||
URL of http host to connect to
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Connect to example.com
|
||||
\ "https://example.com"
|
||||
url> https://example.com/remote.php/webdav/
|
||||
Name of the WebDAV site/service/software you are using
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Nextcloud
|
||||
\ "nextcloud"
|
||||
2 / Owncloud
|
||||
\ "owncloud"
|
||||
3 / Other site/service or software
|
||||
\ "other"
|
||||
vendor> 1
|
||||
User name
|
||||
user> user
|
||||
Password.
|
||||
y) Yes type in my own password
|
||||
g) Generate random password
|
||||
n) No leave this optional password blank
|
||||
y/g/n> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
url = https://example.com/remote.php/webdav/
|
||||
vendor = nextcloud
|
||||
user = user
|
||||
pass = *** ENCRYPTED ***
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your WebDAV
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your WebDAV
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an WebDAV directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and hashes ###
|
||||
|
||||
Plain WebDAV does not support modified times. However when used with
|
||||
Owncloud or Nextcloud rclone will support modified times.
|
||||
|
||||
Hashes are not supported.
|
||||
|
||||
### Owncloud ###
|
||||
|
||||
Click on the settings cog in the bottom right of the page and this
|
||||
will show the WebDAV URL that rclone needs in the config step. It
|
||||
will look something like `https://example.com/remote.php/webdav/`.
|
||||
|
||||
Owncloud supports modified times using the `X-OC-Mtime` header.
|
||||
|
||||
### Nextcloud ###
|
||||
|
||||
This is configured in an identical way to Owncloud. Note that
|
||||
Nextcloud does not support streaming of files (`rcat`) whereas
|
||||
Owncloud does. This [may be
|
||||
fixed](https://github.com/nextcloud/nextcloud-snap/issues/365) in the
|
||||
future.
|
@ -66,6 +66,7 @@
|
||||
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Openstack Swift</a></li>
|
||||
<li><a href="/pcloud/"><i class="fa fa-cloud"></i> pCloud</a></li>
|
||||
<li><a href="/sftp/"><i class="fa fa-server"></i> SFTP</a></li>
|
||||
<li><a href="/webdav/"><i class="fa fa-server"></i> WebDAV</a></li>
|
||||
<li><a href="/yandex/"><i class="fa fa-space-shuttle"></i> Yandex Disk</a></li>
|
||||
<li><a href="/local/"><i class="fa fa-file"></i> The local filesystem</a></li>
|
||||
</ul>
|
||||
|
@ -20,5 +20,6 @@ import (
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/sftp"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
_ "github.com/ncw/rclone/webdav"
|
||||
_ "github.com/ncw/rclone/yandex"
|
||||
)
|
||||
|
@ -118,6 +118,11 @@ var (
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestWebdav:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
}
|
||||
binary = "fs.test"
|
||||
// Flags
|
||||
|
@ -165,5 +165,6 @@ func main() {
|
||||
generateTestProgram(t, fns, "QingStor", buildConstraint("!plan9"))
|
||||
generateTestProgram(t, fns, "AzureBlob", buildConstraint("go1.7"))
|
||||
generateTestProgram(t, fns, "Pcloud")
|
||||
generateTestProgram(t, fns, "Webdav")
|
||||
log.Printf("Done")
|
||||
}
|
||||
|
111
webdav/api/types.go
Normal file
111
webdav/api/types.go
Normal file
@ -0,0 +1,111 @@
|
||||
// Package api has type definitions for webdav
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Wed, 27 Sep 2017 14:28:34 GMT
|
||||
timeFormat = time.RFC1123
|
||||
)
|
||||
|
||||
// Multistatus contains responses returned from an HTTP 207 return code
|
||||
type Multistatus struct {
|
||||
Responses []Response `xml:"response"`
|
||||
}
|
||||
|
||||
// Response contains an Href the response it about and its properties
|
||||
type Response struct {
|
||||
Href string `xml:"href"`
|
||||
Props Prop `xml:"propstat"`
|
||||
}
|
||||
|
||||
// Prop is the properties of a response
|
||||
type Prop struct {
|
||||
Status string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK",
|
||||
var parseStatus = regexp.MustCompile(`^HTTP/[0-9.]+\s+(\d+)\s+(.*)$`)
|
||||
|
||||
// StatusOK examines the Status and returns an OK flag
|
||||
func (p *Prop) StatusOK() bool {
|
||||
match := parseStatus.FindStringSubmatch(p.Status)
|
||||
if len(match) < 3 {
|
||||
return false
|
||||
}
|
||||
code, err := strconv.Atoi(match[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if code >= 200 && code < 300 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
type PropValue struct {
|
||||
XMLName xml.Name `xml:""`
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// Error is used to desribe webdav errors
|
||||
//
|
||||
// <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns">
|
||||
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
|
||||
// <s:message>File with name Photo could not be located</s:message>
|
||||
// </d:error>
|
||||
type Error struct {
|
||||
Exception string `xml:"exception,omitempty"`
|
||||
Message string `xml:"message,omitempty"`
|
||||
Status string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
if e.Exception != "" {
|
||||
return e.Exception
|
||||
}
|
||||
if e.Status != "" {
|
||||
return e.Status
|
||||
}
|
||||
return "Webdav Error"
|
||||
}
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// webdav API marshalling to and from timeFormat
|
||||
type Time time.Time
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return e.EncodeElement(timeString, start)
|
||||
}
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
err := d.DecodeElement(&v, &start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newT, err := time.Parse(timeFormat, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
900
webdav/webdav.go
Normal file
900
webdav/webdav.go
Normal file
@ -0,0 +1,900 @@
|
||||
// Package webdav provides an interface to the Webdav
|
||||
// object storage system.
|
||||
package webdav
|
||||
|
||||
// Owncloud: Getting Oc-Checksum:
|
||||
// SHA1:f572d396fae9206628714fb2ce00f72e94f2258f on HEAD but not on
|
||||
// nextcloud?
|
||||
|
||||
// docs for file webdav
|
||||
// https://docs.nextcloud.com/server/12/developer_manual/client_apis/WebDAV/index.html
|
||||
|
||||
// indicates checksums can be set as metadata here
|
||||
// https://github.com/nextcloud/server/issues/6129
|
||||
// owncloud seems to have checksums as metadata though - can read them
|
||||
|
||||
// SetModTime might be possible
|
||||
// https://stackoverflow.com/questions/3579608/webdav-can-a-client-modify-the-mtime-of-a-file
|
||||
// ...support for a PROPSET to lastmodified (mind the missing get) which does the utime() call might be an option.
|
||||
// For example the ownCloud WebDAV server does it that way.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/ncw/rclone/webdav/api"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "webdav",
|
||||
Description: "Webdav",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "vendor",
|
||||
Help: "Name of the Webdav site/service/software you are using",
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nextcloud",
|
||||
Help: "Nextcloud",
|
||||
}, {
|
||||
Value: "owncloud",
|
||||
Help: "Owncloud",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Other site/service or software",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name",
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Optional: true,
|
||||
IsPassword: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
user string // username
|
||||
pass string // password
|
||||
vendor string // name of the vendor
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("webdav root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a webdav path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an webdav 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Prop, err error) {
|
||||
// FIXME how do we read back additional properties?
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: f.filePath(path),
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read metadata failed")
|
||||
}
|
||||
if len(result.Responses) < 1 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
item := result.Responses[0]
|
||||
if !item.Props.StatusOK() {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if strings.HasSuffix(item.Href, "/") {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &item.Props, nil
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeXML(resp, &errResponse)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
errResponse.Status = resp.Status
|
||||
errResponse.StatusCode = resp.StatusCode
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// addShlash makes sure s is terminated with a / if non empty
|
||||
func addSlash(s string) string {
|
||||
if s != "" && !strings.HasSuffix(s, "/") {
|
||||
s += "/"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// filePath returns a file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return rest.URLEscape(path.Join(f.root, file))
|
||||
}
|
||||
|
||||
// dirPath returns a directory path (f.root, dir)
|
||||
func (f *Fs) dirPath(dir string) string {
|
||||
return addSlash(f.filePath(dir))
|
||||
}
|
||||
|
||||
// filePath returns a file path (f.root, remote)
|
||||
func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := fs.ConfigFileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
}
|
||||
|
||||
user := fs.ConfigFileGet(name, "user")
|
||||
pass := fs.ConfigFileGet(name, "pass")
|
||||
if pass != "" {
|
||||
var err error
|
||||
pass, err = fs.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
}
|
||||
vendor := fs.ConfigFileGet(name, "vendor")
|
||||
|
||||
// Parse the endpoint
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fs.Config.Client()).SetRoot(u.String()).SetUserPass(user, pass),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
user: user,
|
||||
pass: pass,
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
f.setQuirks(vendor)
|
||||
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(vendor string) {
|
||||
if vendor == "" {
|
||||
vendor = "other"
|
||||
}
|
||||
f.vendor = vendor
|
||||
switch vendor {
|
||||
case "owncloud":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
case "other":
|
||||
default:
|
||||
fs.Debugf(f, "Unknown vendor %q", vendor)
|
||||
}
|
||||
|
||||
// Remove PutStream from optional features
|
||||
if !f.canStream {
|
||||
f.features.PutStream = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if info != nil {
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
// User function to process a File item from listAll
|
||||
//
|
||||
// Should return true to finish processing
|
||||
type listAllFn func(string, bool, *api.Prop) bool
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: f.dirPath(dir), // FIXME Should not start with /
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return found, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
//fmt.Printf("result = %#v", &result)
|
||||
baseURL, err := rest.URLJoin(f.endpoint, opts.Path)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "couldn't join URL")
|
||||
}
|
||||
for i := range result.Responses {
|
||||
item := &result.Responses[i]
|
||||
|
||||
// Collections must end in /
|
||||
isDir := strings.HasSuffix(item.Href, "/")
|
||||
|
||||
// Find name
|
||||
u, err := rest.URLJoin(baseURL, item.Href)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "URL Join failed for %q and %q: %v", baseURL, item.Href, err)
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(u.Path, baseURL.Path) {
|
||||
fs.Debugf(nil, "Item with unknown path received: %q, %q", item.Href, u.Path)
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, u.Path[len(baseURL.Path):])
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
|
||||
// the listing contains info about itself which we ignore
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check OK
|
||||
if !item.Props.StatusOK() {
|
||||
fs.Debugf(remote, "Ignoring item with bad status %q", item.Props.Status)
|
||||
continue
|
||||
}
|
||||
|
||||
if isDir {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// item.Name = restoreReservedChars(item.Name)
|
||||
if fn(remote, isDir, &item.Props) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
var iErr error
|
||||
_, err = f.listAll(dir, false, false, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
if isDir {
|
||||
d := fs.NewDir(remote, time.Time(info.Modified))
|
||||
// .SetID(info.ID)
|
||||
// FIXME more info from dir? can set size, items?
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object) {
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
|
||||
return o, o.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// mkParentDir makes the parent of the native path dirPath if
|
||||
// necessary and any directories above that
|
||||
func (f *Fs) mkParentDir(dirPath string) error {
|
||||
parent := path.Dir(dirPath)
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return f.mkdir(parent)
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// We assume the root is already ceated
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "MKCOL",
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed {
|
||||
return nil
|
||||
}
|
||||
// parent does not exists
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
err = f.mkdir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
dirPath := f.dirPath(dir)
|
||||
return f.mkdir(dirPath)
|
||||
}
|
||||
|
||||
// dirNotEmpty returns true if the directory exists and is not Empty
|
||||
//
|
||||
// if the directory does not exist then err will be ErrorDirNotFound
|
||||
func (f *Fs) dirNotEmpty(dir string) (found bool, err error) {
|
||||
return f.listAll(dir, false, false, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
if check {
|
||||
notEmpty, err := f.dirNotEmpty(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if notEmpty {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: f.dirPath(dir),
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
}
|
||||
// FIXME parse Multistatus response
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return f.precision
|
||||
}
|
||||
|
||||
// Copy or Move src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy/fs.ErrorCantMove
|
||||
func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
if method == "COPY" {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(dstPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Copy mkParentDir failed")
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: method,
|
||||
Path: srcObj.filePath(),
|
||||
NoResponse: true,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Destination": path.Join(f.endpoint.Path, dstPath),
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
if f.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9)
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Copy call failed")
|
||||
}
|
||||
dstObj, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Copy NewObject failed")
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return f.copyOrMove(src, remote, "COPY")
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return f.copyOrMove(src, remote, "MOVE")
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := srcFs.filePath(srcRemote)
|
||||
dstPath := f.filePath(dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
_, err := f.dirNotEmpty(dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
if err != fs.ErrorDirNotFound {
|
||||
return errors.Wrap(err, "DirMove dirExists dst failed")
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkParentDir(dstPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "MOVE",
|
||||
Path: addSlash(srcPath),
|
||||
NoResponse: true,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Destination": addSlash(path.Join(f.endpoint.Path, dstPath)),
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove MOVE call failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashNone)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashSHA1 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
}
|
||||
return o.sha1, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
}
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: o.filePath(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
err = o.fs.mkParentDir(o.filePath())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update mkParentDir failed")
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders = map[string]string{
|
||||
"X-OC-Mtime": fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9),
|
||||
}
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: o.filePath(),
|
||||
NoResponse: true,
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
73
webdav/webdav_test.go
Normal file
73
webdav/webdav_test.go
Normal file
@ -0,0 +1,73 @@
|
||||
// Test Webdav filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package webdav_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/webdav"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*webdav.Object)(nil))
|
||||
fstests.RemoteName = "TestWebdav:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
Loading…
Reference in New Issue
Block a user