1
mirror of https://github.com/rclone/rclone synced 2024-11-21 22:50:16 +01:00

azureblob: rework and complete #801

* Fixup bitrot (rclone and Azure library)
  * Implement Copy
  * Add modtime to metadata under mtime key as RFC3339Nano
  * Make multipart upload work
  * Make it pass the integration tests
  * Fix uploading of zero length blobs
  * Rename to azureblob as it seems likely we will do azurefile
  * Add docs
This commit is contained in:
Nick Craig-Wood 2017-07-25 15:18:13 +01:00
parent 98d238daa4
commit 92d2e1f8d7
17 changed files with 1401 additions and 506 deletions

View File

@ -244,7 +244,7 @@ Getting going
* onedrive is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* Add your remote to the imports in `fs/all/all.go`
* If web based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
Unit tests

View File

@ -25,6 +25,7 @@ Rclone is a command line program to sync files and directories to and from
* Google Drive
* HTTP
* Hubic
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore
* QingStor

View File

@ -1,468 +0,0 @@
package azure
import (
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/ncw/rclone/fs"
"time"
"fmt"
"encoding/base64"
"encoding/hex"
"io"
"os"
"path"
)
const (
listChunkSize = 5000 // number of items to read at once
)
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
account string // name of the storage Account
container string // name of the Storage Account Container
root string
features *fs.Features // optional features
bc *storage.BlobStorageClient
cc *storage.Container
}
type Object struct {
fs *Fs
remote string
blob *storage.Blob
}
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "azure",
Description: "Azure Blob Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "azure_account",
Help: "Azure Storage Account Name",
}, {
Name: "azure_account_key",
Help: "Azure Storage Account Key",
}, {
Name: "azure_container",
Help: "Azure Storage Account Blob Container",
}},
}
fs.Register(fsi)
}
//func azureParseUri(uri string) (account, container, root string, err error) {
// //https://hl37iyhcj646wshrd0.blob.core.windows.net/shared
// parts := matcher.FindStringSubmatch(uri)
// if parts == nil {
// err = errors.Errorf("couldn't parse account / continer out of azure path %q", uri)
// } else {
// account, container, root = parts[1], parts[2], parts[3]
// root = strings.Trim(root, "/")
// }
// return
//}
func azureConnection(name, account, accountKey, container string) (*storage.BlobStorageClient, *storage.Container, error) {
client, err := storage.NewClient(account, accountKey, storage.DefaultBaseURL, "2016-05-31", true)
if err != nil {
return nil, nil, err
}
tmp_bc := client.GetBlobService()
bc := &tmp_bc
tmp_cc := bc.GetContainerReference(container)
cc := &tmp_cc
return bc, cc, nil
}
func sl(path string) string {
if path[len(path)-1:] != "/" {
return path + "/"
} else {
return path
}
}
func unsl(path string) string {
if path[len(path)-1:] == "/" {
return path[:len(path)-1]
} else {
return path
}
}
func NewFs(name, root string) (fs.Fs, error) {
account := fs.ConfigFileGet(name, "azure_account", os.Getenv("AZURE_ACCOUNT"))
accountKey := fs.ConfigFileGet(name, "azure_account_key", os.Getenv("AZURE_ACCOUNT_KEY"))
container := fs.ConfigFileGet(name, "azure_container", os.Getenv("AZURE_CONTAINER"))
bc, cc, err := azureConnection(name, account, accountKey, container)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
account: account,
container: container,
root: root,
bc: bc,
cc: cc,
}
if f.root != "" {
f.root = sl(f.root)
_, err := bc.GetBlobProperties(container, root)
if err == nil {
// exists !
f.root = path.Dir(root)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
return f, fs.ErrorIsFile
}
}
f.features = (&fs.Features{}).Fill(f)
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Azure Blob Account %s container %s, directory %s", f.account, f.container, f.root)
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err := f.bc.CopyBlob(f.container, f.root + remote, f.bc.GetBlobURL(f.container, srcObj.blob.Name))
if err != nil {
return nil, err
}
return f.NewObject(remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
type visitFunc func(remote string, blob *storage.Blob, isDirectory bool) error
func listInnerRecurse(f *Fs, out *fs.ListOpts, dir string, level int, visitor visitFunc) error {
dirWithRoot := f.root
if dir != "" {
dirWithRoot += dir + "/"
}
maxresults := uint(listChunkSize)
delimiter := "/"
if level == fs.MaxLevel {
return fs.ErrorLevelNotSupported
}
marker := ""
for {
resp, err := f.cc.ListBlobs(storage.ListBlobsParameters{
Prefix: dirWithRoot,
Delimiter: delimiter,
Marker: marker,
Include: "metadata",
MaxResults: maxresults,
Timeout: 100,
})
if err != nil {
return err
}
rootLength := len(f.root)
for _, blob := range resp.Blobs {
err := visitor(blob.Name[rootLength:], &blob, false)
if err != nil {
return err
}
}
for _, blobPrefix := range resp.BlobPrefixes {
strippedDir := unsl(blobPrefix[rootLength:])
err := visitor(strippedDir, nil, true)
if err != nil {
return err
}
if err == nil && level < (*out).Level() {
err := listInnerRecurse(f, out, strippedDir, level+1, visitor)
if err != nil {
return err
}
}
}
if resp.NextMarker != "" {
marker = resp.NextMarker
} else {
break
}
}
return nil
}
// List lists files and directories to out
func (f *Fs) List(out fs.ListOpts, dir string) {
defer out.Finished()
// List the objects and directories
listInnerRecurse(f, &out, dir, 1, func(remote string, blob *storage.Blob, isDirectory bool) error {
if isDirectory {
dir := &fs.Dir{
Name: remote,
Bytes: int64(0),
Count: 0,
}
if out.AddDir(dir) {
return fs.ErrorListAborted
}
} else {
newBlob := blob
o, err := f.newObjectWithInfo(remote, newBlob)
if err != nil {
return err
}
if out.Add(o) {
return fs.ErrorListAborted
}
}
return nil
})
return
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
func copyBlob(blob *storage.Blob) *storage.Blob {
var tmp storage.Blob = storage.Blob{}
tmp.Name = blob.Name
tmp.Properties.LastModified = blob.Properties.LastModified
tmp.Properties.Etag = blob.Properties.Etag
tmp.Properties.ContentMD5 = blob.Properties.ContentMD5
tmp.Properties.ContentLength = blob.Properties.ContentLength
tmp.Properties.ContentType = blob.Properties.ContentType
tmp.Properties.ContentEncoding = blob.Properties.ContentEncoding
tmp.Properties.CacheControl = blob.Properties.CacheControl
tmp.Properties.ContentLanguage = blob.Properties.ContentLanguage
tmp.Properties.BlobType = blob.Properties.BlobType
tmp.Properties.SequenceNumber = blob.Properties.SequenceNumber
tmp.Properties.CopyID = blob.Properties.CopyID
tmp.Properties.CopyStatus = blob.Properties.CopyStatus
tmp.Properties.CopySource = blob.Properties.CopySource
tmp.Properties.CopyProgress = blob.Properties.CopyProgress
tmp.Properties.CopyCompletionTime = blob.Properties.CopyCompletionTime
tmp.Properties.CopyStatusDescription = blob.Properties.CopyStatusDescription
tmp.Properties.LeaseStatus = blob.Properties.LeaseStatus
tmp.Properties.LeaseState = blob.Properties.LeaseState
for k,v := range blob.Metadata {
tmp.Metadata[k] = v
}
return &tmp
}
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, blob *storage.Blob) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if blob != nil {
o.blob = copyBlob(blob)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// Put the Object into the bucket
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
// Temporary Object under construction
fso := &Object{
fs: f,
remote: src.Remote(),
}
return fso, fso.Update(in, src)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
return nil
}
// Rmdir deletes the bucket if the fs is at the root
// Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
return nil
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
}
dc, err := base64.StdEncoding.DecodeString(o.blob.Properties.ContentMD5)
if err != nil {
fs.Logf(o, "Cannot decode string: %s", err)
return "", err
}
return hex.EncodeToString(dc), nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.blob.Properties.ContentLength
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if o.blob != nil {
return nil
}
meta, err := o.fs.bc.GetBlobMetadata(o.fs.container, o.fs.root + o.remote)
if err != nil {
return err
}
props, err := o.fs.bc.GetBlobProperties(o.fs.container, o.fs.root + o.remote)
if err != nil {
return err
}
o.blob = copyBlob(&storage.Blob{Name: o.remote, Properties: *props, Metadata: meta})
return nil
}
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
t, _ := time.Parse(time.RFC1123, o.blob.Properties.LastModified)
if err != nil {
fs.Logf(o, "Failed to read LastModified: %v", err)
return time.Now()
}
return t
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
return nil
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var readRange *string = nil
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
readRange = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if readRange != nil {
return o.fs.bc.GetBlobRange(o.fs.container, o.fs.root + o.remote, *readRange, map[string]string{})
} else {
return o.fs.bc.GetBlob(o.fs.container, o.fs.root + o.remote)
}
}
// Update the Object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
size := src.Size()
if size <= 64 * 1000 * 1000 {
err := o.fs.bc.CreateBlockBlobFromReader(o.fs.container, o.fs.root + o.remote, uint64(size), in, map[string]string{})
if err != nil {
return err
}
} else {
// create block, put block, put block list
return fs.ErrorCantCopy
}
// Read the metadata from the newly created object
o.blob = nil // wipe old metadata
err := o.readMetaData()
return err
}
// Remove an object
func (o *Object) Remove() error {
return o.fs.bc.DeleteBlob(o.fs.container, o.fs.root + o.remote, map[string]string{})
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.blob.Properties.ContentType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

1107
azureblob/azureblob.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
// Test AzureBlob filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
// +build go1.7
package azureblob_test
import (
"testing"
"github.com/ncw/rclone/azureblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*azureblob.Object)(nil))
fstests.RemoteName = "TestAzureBlob:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestFsPutUnknownLengthFile(t *testing.T) { fstests.TestFsPutUnknownLengthFile(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.7
package azureblob

View File

@ -32,6 +32,7 @@ docs = [
"drive.md",
"http.md",
"hubic.md",
"azureblob.md",
"onedrive.md",
"qingstor.md",
"swift.md",

View File

@ -51,6 +51,7 @@ from various cloud storage systems and using file transfer services, such as:
* Google Drive
* HTTP
* Hubic
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore
* QingStor

View File

@ -23,6 +23,7 @@ Rclone is a command line program to sync files and directories to and from
* Google Drive
* HTTP
* Hubic
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore
* QingStor

159
docs/content/azureblob.md Normal file
View File

@ -0,0 +1,159 @@
---
title: "Microsoft Azure Blob Storage"
description: "Rclone docs for Microsoft Azure Blob Storage"
date: "2017-07-30"
---
<i class="fa fa-windows"></i> Microsoft Azure Blob Storage
-----------------------------------------
Paths are specified as `remote:container` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg
`remote:container/path/to/dir`.
Here is an example of making a Microsoft Azure Blob Storage
configuration. For a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Box
\ "box"
5 / Dropbox
\ "dropbox"
6 / Encrypt/Decrypt a remote
\ "crypt"
7 / FTP Connection
\ "ftp"
8 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
9 / Google Drive
\ "drive"
10 / Hubic
\ "hubic"
11 / Local Disk
\ "local"
12 / Microsoft Azure Blob Storage
\ "azureblob"
13 / Microsoft OneDrive
\ "onedrive"
14 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
15 / SSH/SFTP Connection
\ "sftp"
16 / Yandex Disk
\ "yandex"
17 / http Connection
\ "http"
Storage> azureblob
Storage Account Name
account> account_name
Storage Account Key
key> base64encodedkey==
Endpoint for the service - leave blank normally.
endpoint>
Remote config
--------------------
[remote]
account = account_name
key = base64encodedkey==
endpoint =
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
See all containers
rclone lsd remote:
Make a new container
rclone mkdir remote:container
List the contents of a container
rclone ls remote:container
Sync `/home/local/directory` to the remote container, deleting any excess
files in the container.
rclone sync /home/local/directory remote:container
### --fast-list ###
This remote supports `--fast-list` which allows you to use fewer
transactions in exchange for more memory. See the [rclone
docs](/docs/#fast-list) for more details.
### Modified time ###
The modified time is stored as metadata on the object with the `mtime`
key. It is stored using RFC3339 Format time with nanosecond
precision. The metadata is supplied during directory listings so
there is no overhead to using it.
### Hashes ###
MD5 hashes are stored with small blobs. However blobs that were
uploaded in chunks don't have MD5 hashes.
### Multipart uploads ###
Rclone supports multipart uploads with Azure Blob storage. Files
bigger than 256MB will be uploaded using chunked upload by default.
The files will be uploaded in parallel in 4MB chunks (by default).
Note that these chunks are buffered in memory and there may be up to
`--transfers` of them being uploaded at once.
Files can't be split into more than 50,000 chunks so by default, so
the largest file that can be uploaded with 4MB chunk size is 195GB.
Above this rclone will double the chunk size until it creates less
than 50,000 chunks. By default this will mean a maximum file size of
3.2TB can be uploaded. This can be raised to 5TB using
`--azureblob-chunk-size 100M`.
Note that rclone doesn't commit the block list until the end of the
upload which means that there is a limit of 9.5TB of multipart uploads
in progress as Azure won't allow more than that amount of uncommitted
blocks.
### Specific options ###
Here are the command line options specific to this cloud storage
system.
#### --azureblob-upload-cutoff=SIZE ####
Cutoff for switching to chunked upload - must be <= 256MB. The default
is 256MB.
#### --azureblob-chunk-size=SIZE ####
Upload chunk size. Default 4MB. Note that this is stored in memory
and there may be up to `--transfers` chunks stored at once in memory.
This can be at most 100MB.
### Limitations ###
MD5 sums are only uploaded with chunked files if the source has an MD5
sum. This will always be the case for a local to azure copy.

View File

@ -30,6 +30,7 @@ See the following for detailed instructions for
* [Google Drive](/drive/)
* [HTTP](/http/)
* [Hubic](/hubic/)
* [Microsoft Azure Blob Storage](/azureblob/)
* [Microsoft OneDrive](/onedrive/)
* [Openstack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
* [QingStor](/qingstor/)

View File

@ -15,24 +15,25 @@ show through.
Here is an overview of the major features of each cloud storage system.
| Name | Hash | ModTime | Case Insensitive | Duplicate Files | MIME Type |
| ---------------------- |:-------:|:-------:|:----------------:|:---------------:|:---------:|
| Amazon Drive | MD5 | No | Yes | No | R |
| Amazon S3 | MD5 | Yes | No | No | R/W |
| Backblaze B2 | SHA1 | Yes | No | No | R/W |
| Box | SHA1 | Yes | Yes | No | - |
| Dropbox | DBHASH †| Yes | Yes | No | - |
| FTP | - | No | No | No | - |
| Google Cloud Storage | MD5 | Yes | No | No | R/W |
| Google Drive | MD5 | Yes | No | Yes | R/W |
| HTTP | - | No | No | No | R |
| Hubic | MD5 | Yes | No | No | R/W |
| Microsoft OneDrive | SHA1 | Yes | Yes | No | R |
| Openstack Swift | MD5 | Yes | No | No | R/W |
| QingStor | - | No | No | No | R/W |
| SFTP | - | Yes | Depends | No | - |
| Yandex Disk | MD5 | Yes | No | No | R/W |
| The local filesystem | All | Yes | Depends | No | - |
| Name | Hash | ModTime | Case Insensitive | Duplicate Files | MIME Type |
| ---------------------------- |:-------:|:-------:|:----------------:|:---------------:|:---------:|
| Amazon Drive | MD5 | No | Yes | No | R |
| Amazon S3 | MD5 | Yes | No | No | R/W |
| Backblaze B2 | SHA1 | Yes | No | No | R/W |
| Box | SHA1 | Yes | Yes | No | - |
| Dropbox | DBHASH †| Yes | Yes | No | - |
| FTP | - | No | No | No | - |
| Google Cloud Storage | MD5 | Yes | No | No | R/W |
| Google Drive | MD5 | Yes | No | Yes | R/W |
| HTTP | - | No | No | No | R |
| Hubic | MD5 | Yes | No | No | R/W |
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
| Microsoft OneDrive | SHA1 | Yes | Yes | No | R |
| Openstack Swift | MD5 | Yes | No | No | R/W |
| QingStor | - | No | No | No | R/W |
| SFTP | - | Yes | Depends | No | - |
| Yandex Disk | MD5 | Yes | No | No | R/W |
| The local filesystem | All | Yes | Depends | No | - |
### Hash ###
@ -111,24 +112,25 @@ All the remotes support a basic set of features, but there are some
optional features supported by some remotes used to make some
operations more efficient.
| Name | Purge | Copy | Move | DirMove | CleanUp | ListR |
| ---------------------- |:-----:|:----:|:----:|:-------:|:-------:|:-----:|
| Amazon Drive | Yes | No | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| Amazon S3 | No | Yes | No | No | No | Yes |
| Backblaze B2 | No | No | No | No | Yes | Yes |
| Box | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| FTP | No | No | Yes | Yes | No | No |
| Google Cloud Storage | Yes | Yes | No | No | No | Yes |
| Google Drive | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| HTTP | No | No | No | No | No | No |
| Hubic | Yes † | Yes | No | No | No | Yes |
| Microsoft OneDrive | Yes | Yes | Yes | No [#197](https://github.com/ncw/rclone/issues/197) | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| Openstack Swift | Yes † | Yes | No | No | No | Yes |
| QingStor | No | Yes | No | No | No | Yes |
| SFTP | No | No | Yes | Yes | No | No |
| Yandex Disk | Yes | No | No | No | No [#575](https://github.com/ncw/rclone/issues/575) | Yes |
| The local filesystem | Yes | No | Yes | Yes | No | No |
| Name | Purge | Copy | Move | DirMove | CleanUp | ListR |
| ---------------------------- |:-----:|:----:|:----:|:-------:|:-------:|:-----:|
| Amazon Drive | Yes | No | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| Amazon S3 | No | Yes | No | No | No | Yes |
| Backblaze B2 | No | No | No | No | Yes | Yes |
| Box | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| FTP | No | No | Yes | Yes | No | No |
| Google Cloud Storage | Yes | Yes | No | No | No | Yes |
| Google Drive | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| HTTP | No | No | No | No | No | No |
| Hubic | Yes † | Yes | No | No | No | Yes |
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes |
| Microsoft OneDrive | Yes | Yes | Yes | No [#197](https://github.com/ncw/rclone/issues/197) | No [#575](https://github.com/ncw/rclone/issues/575) | No |
| Openstack Swift | Yes † | Yes | No | No | No | Yes |
| QingStor | No | Yes | No | No | No | Yes |
| SFTP | No | No | Yes | Yes | No | No |
| Yandex Disk | Yes | No | No | No | No [#575](https://github.com/ncw/rclone/issues/575) | Yes |
| The local filesystem | Yes | No | Yes | Yes | No | No |
### Purge ###

View File

@ -60,6 +60,7 @@
<li><a href="/drive/"><i class="fa fa-google"></i> Google Drive</a></li>
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>
<li><a href="/qingstor/"><i class="fa fa-hdd-o"></i> QingStor</a></li>
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Openstack Swift</a></li>

View File

@ -3,7 +3,7 @@ package all
import (
// Active file systems
_ "github.com/ncw/rclone/amazonclouddrive"
_ "github.com/ncw/rclone/azure"
_ "github.com/ncw/rclone/azureblob"
_ "github.com/ncw/rclone/b2"
_ "github.com/ncw/rclone/box"
_ "github.com/ncw/rclone/crypt"

View File

@ -48,6 +48,7 @@ var (
ErrorNotAFile = errors.New("is a not a regular file")
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
ErrorCantMoveOverlapping = errors.New("can't move files on overlapping remotes")
ErrorDirectoryNotEmpty = errors.New("directory not empty")
)
// RegInfo provides information about a filesystem

View File

@ -108,6 +108,11 @@ var (
SubDir: false,
FastList: false,
},
{
Name: "TestAzureBlob:",
SubDir: true,
FastList: true,
},
}
binary = "fs.test"
// Flags

View File

@ -163,5 +163,6 @@ func main() {
generateTestProgram(t, fns, "FTP")
generateTestProgram(t, fns, "Box")
generateTestProgram(t, fns, "QingStor", buildConstraint("!plan9"))
generateTestProgram(t, fns, "AzureBlob", buildConstraint("go1.7"))
log.Printf("Done")
}