diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c548aeef3..73d98be19 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -116,6 +116,52 @@ then run in that directory
 
     go run test_all.go
 
+## Code Organisation ##
+
+Rclone code is organised into a small number of top level directories
+with modules beneath.
+
+  * backend - the rclone backends for interfacing to cloud providers - 
+    * all - import this to load all the cloud providers
+    * ...providers
+  * bin - scripts for use while building or maintaining rclone
+  * cmd - the rclone commands
+    * all - import this to load all the commands
+    * ...commands
+  * docs - the documentation and website
+    * content - adjust these docs only - everything else is autogenerated
+  * fs - main rclone definitions - minimal amount of code
+    * accounting - bandwidth limiting and statistics
+    * asyncreader - an io.Reader which reads ahead
+    * config - manage the config file and flags
+    * driveletter - detect if a name is a drive letter
+    * filter - implements include/exclude filtering
+    * fserrors - rclone specific error handling
+    * fshttp - http handling for rclone
+    * fspath - path handling for rclone
+    * hash - defines rclones hash types and functions
+    * list - list a remote
+    * log - logging facilities
+    * march - iterates directories in lock step
+    * object - in memory Fs objects
+    * operations - primitives for sync, eg Copy, Move
+    * sync - sync directories
+    * walk - walk a directory
+  * fstest - provides integration test framework
+    * fstests - integration tests for the backends
+    * mockdir - mocks an fs.Directory
+    * mockobject - mocks an fs.Object
+    * test_all - Runs integration tests for everything
+  * graphics - the images used in the website etc
+  * lib - libraries used by the backend
+    * dircache - directory ID to name caching
+    * oauthutil - helpers for using oauth
+    * pacer - retries with backoff and paces operations
+    * readers - a selection of useful io.Readers
+    * rest - a thin abstraction over net/http for REST
+  * vendor - 3rd party code managed by the dep tool
+  * vfs - Virtual FileSystem layer for implementing rclone mount and similar
+
 ## Writing Documentation ##
 
 If you are adding a new feature then please update the documentation.
@@ -240,10 +286,10 @@ Research
 
 Getting going
 
-  * Create `remote/remote.go` (copy this from a similar remote)
+  * Create `backend/remote/remote.go` (copy this from a similar remote)
     * box is a good one to start from if you have a directory based remote
     * b2 is a good one to start from if you have a bucket based remote
-  * Add your remote to the imports in `fs/all/all.go`
+  * Add your remote to the imports in `backend/all/all.go`
   * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
   * Try to implement as many optional methods as possible as it makes the remote more usable.
 
@@ -251,14 +297,14 @@ Unit tests
 
   * Create a config entry called `TestRemote` for the unit tests to use
   * Add your fs to the end of `fstest/fstests/gen_tests.go`
-  * generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
+  * generate `backend/remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
   * Make sure all tests pass with `go test -v`
 
 Integration tests
 
-  * Add your fs to `fs/test_all.go`
+  * Add your fs to `fstest/test_all/test_all.go`
   * Make sure integration tests pass with
-      * `cd fs`
+      * `cd fs/operations`
       * `go test -v -remote TestRemote:`
   * If you are making a bucket based remote, then check with this also
       * `go test -v -remote TestRemote: -subdir`
diff --git a/Makefile b/Makefile
index ec565f369..09fd7e35a 100644
--- a/Makefile
+++ b/Makefile
@@ -32,8 +32,9 @@ version:
 
 # Full suite of integration tests
 test:	rclone
+	go install github.com/ncw/fstest/test_all
 	-go test $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-	-cd fs && go run $(BUILDTAGS) test_all.go 2>&1 | tee test_all.log
+	-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
 	@echo "Written logs in test.log and fs/test_all.log"
 
 # Quick test
diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go
index ccb4a9aaf..cf98d4ba9 100644
--- a/backend/amazonclouddrive/amazonclouddrive.go
+++ b/backend/amazonclouddrive/amazonclouddrive.go
@@ -24,6 +24,11 @@ import (
 
 	"github.com/ncw/go-acd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/dircache"
 	"github.com/ncw/rclone/lib/oauthutil"
 	"github.com/ncw/rclone/lib/pacer"
@@ -46,7 +51,7 @@ const (
 var (
 	// Flags
 	tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
-	uploadWaitPerGB   = fs.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
+	uploadWaitPerGB   = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
 	// Description of how to auth for this app
 	acdConfig = &oauth2.Config{
 		Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
@@ -73,20 +78,20 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Amazon Application Client Id - required.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Amazon Application Client Secret - required.",
 		}, {
-			Name: fs.ConfigAuthURL,
+			Name: config.ConfigAuthURL,
 			Help: "Auth server URL - leave blank to use Amazon's.",
 		}, {
-			Name: fs.ConfigTokenURL,
+			Name: config.ConfigTokenURL,
 			Help: "Token server url - leave blank to use Amazon's.",
 		}},
 	})
-	fs.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
+	flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
 }
 
 // Fs represents a remote acd server
@@ -171,7 +176,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
 			return true, err
 		}
 	}
-	return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
+	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
 }
 
 // If query parameters contain X-Amz-Algorithm remove Authorization header
@@ -193,7 +198,7 @@ func filterRequest(req *http.Request) {
 // NewFs constructs an Fs from the path, container:path
 func NewFs(name, root string) (fs.Fs, error) {
 	root = parsePath(root)
-	baseClient := fs.Config.Client()
+	baseClient := fshttp.NewClient(fs.Config)
 	if do, ok := baseClient.Transport.(interface {
 		SetRequestFilter(f func(req *http.Request))
 	}); ok {
@@ -212,7 +217,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		root:         root,
 		c:            c,
 		pacer:        pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
-		noAuthClient: fs.Config.Client(),
+		noAuthClient: fshttp.NewClient(fs.Config),
 	}
 	f.features = (&fs.Features{
 		CaseInsensitive:         true,
@@ -472,7 +477,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
 		if iErr != nil {
 			return nil, iErr
 		}
-		if fs.IsRetryError(err) {
+		if fserrors.IsRetryError(err) {
 			fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
 			continue
 		}
@@ -875,8 +880,8 @@ func (f *Fs) Precision() time.Duration {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
 }
 
 // Copy src to this remote using server side copy operations.
@@ -932,9 +937,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the Md5sum of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
 		return *o.info.ContentProperties.Md5, nil
diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go
index b294eea3a..b878048ae 100644
--- a/backend/azureblob/azureblob.go
+++ b/backend/azureblob/azureblob.go
@@ -11,7 +11,7 @@ import (
 	"encoding/binary"
 	"encoding/hex"
 	"fmt"
-	"hash"
+	gohash "hash"
 	"io"
 	"net/http"
 	"path"
@@ -23,6 +23,12 @@ import (
 
 	"github.com/Azure/azure-sdk-for-go/storage"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/rclone/lib/pacer"
 	"github.com/pkg/errors"
 )
@@ -66,8 +72,8 @@ func init() {
 		},
 		},
 	})
-	fs.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload")
-	fs.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.")
+	flags.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload")
+	flags.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.")
 }
 
 // Fs represents a remote azure server
@@ -165,7 +171,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
 			}
 		}
 	}
-	return fs.ShouldRetry(err), err
+	return fserrors.ShouldRetry(err), err
 }
 
 // NewFs contstructs an Fs from the path, container:path
@@ -180,11 +186,11 @@ func NewFs(name, root string) (fs.Fs, error) {
 	if err != nil {
 		return nil, err
 	}
-	account := fs.ConfigFileGet(name, "account")
+	account := config.FileGet(name, "account")
 	if account == "" {
 		return nil, errors.New("account not found")
 	}
-	key := fs.ConfigFileGet(name, "key")
+	key := config.FileGet(name, "key")
 	if key == "" {
 		return nil, errors.New("key not found")
 	}
@@ -193,13 +199,13 @@ func NewFs(name, root string) (fs.Fs, error) {
 		return nil, errors.Errorf("malformed storage account key: %v", err)
 	}
 
-	endpoint := fs.ConfigFileGet(name, "endpoint", storage.DefaultBaseURL)
+	endpoint := config.FileGet(name, "endpoint", storage.DefaultBaseURL)
 
 	client, err := storage.NewClient(account, key, endpoint, apiVersion, true)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to make azure storage client")
 	}
-	client.HTTPClient = fs.Config.Client()
+	client.HTTPClient = fshttp.NewClient(fs.Config)
 	bc := client.GetBlobService()
 
 	f := &Fs{
@@ -473,7 +479,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	if f.container == "" {
 		return fs.ErrorListBucketRequired
 	}
-	list := fs.NewListRHelper(callback)
+	list := walk.NewListRHelper(callback)
 	err = f.list(dir, true, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
 		entry, err := f.itemToDirEntry(remote, object, isDirectory)
 		if err != nil {
@@ -622,8 +628,8 @@ func (f *Fs) Precision() time.Duration {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
 }
 
 // Purge deletes all the files and directories including the old versions.
@@ -690,9 +696,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the MD5 of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	// Convert base64 encoded md5 into lower case hex
 	if o.md5 == "" {
@@ -834,7 +840,7 @@ type openFile struct {
 	o     *Object        // Object we are reading for
 	resp  *http.Response // response of the GET
 	body  io.Reader      // reading from here
-	hash  hash.Hash      // currently accumulating MD5
+	hash  gohash.Hash    // currently accumulating MD5
 	bytes int64          // number of bytes read on this connection
 	eof   bool           // whether we have read end of file
 }
@@ -1059,7 +1065,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
 	size := src.Size()
 	blob := o.getBlobWithModTime(src.ModTime())
 	blob.Properties.ContentType = fs.MimeType(o)
-	if sourceMD5, _ := src.Hash(fs.HashMD5); sourceMD5 != "" {
+	if sourceMD5, _ := src.Hash(hash.HashMD5); sourceMD5 != "" {
 		sourceMD5bytes, err := hex.DecodeString(sourceMD5)
 		if err == nil {
 			blob.Properties.ContentMD5 = base64.StdEncoding.EncodeToString(sourceMD5bytes)
diff --git a/backend/b2/api/types.go b/backend/b2/api/types.go
index bbb4e83b8..d8b1ff28b 100644
--- a/backend/b2/api/types.go
+++ b/backend/b2/api/types.go
@@ -7,7 +7,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/fserrors"
 )
 
 // Error describes a B2 error response
@@ -29,7 +29,7 @@ func (e *Error) Fatal() bool {
 	return e.Status == 403 // 403 errors shouldn't be retried
 }
 
-var _ fs.Fataler = (*Error)(nil)
+var _ fserrors.Fataler = (*Error)(nil)
 
 // Account describes a B2 account
 type Account struct {
diff --git a/backend/b2/b2.go b/backend/b2/b2.go
index 1648c51d5..dde5bc9f1 100644
--- a/backend/b2/b2.go
+++ b/backend/b2/b2.go
@@ -9,7 +9,7 @@ import (
 	"bytes"
 	"crypto/sha1"
 	"fmt"
-	"hash"
+	gohash "hash"
 	"io"
 	"net/http"
 	"path"
@@ -21,6 +21,13 @@ import (
 
 	"github.com/ncw/rclone/backend/b2/api"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/rclone/lib/pacer"
 	"github.com/ncw/rclone/lib/rest"
 	"github.com/pkg/errors"
@@ -48,9 +55,9 @@ var (
 	minChunkSize       = fs.SizeSuffix(5E6)
 	chunkSize          = fs.SizeSuffix(96 * 1024 * 1024)
 	uploadCutoff       = fs.SizeSuffix(200E6)
-	b2TestMode         = fs.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
-	b2Versions         = fs.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
-	b2HardDelete       = fs.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
+	b2TestMode         = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
+	b2Versions         = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
+	b2HardDelete       = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
 	errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
 )
 
@@ -72,8 +79,8 @@ func init() {
 		},
 		},
 	})
-	fs.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
-	fs.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
+	flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
+	flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
 }
 
 // Fs represents a remote b2 server
@@ -186,7 +193,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
 		}
 		return true, err
 	}
-	return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
+	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
 }
 
 // shouldRetry returns a boolean as to whether this resp and err
@@ -236,15 +243,15 @@ func NewFs(name, root string) (fs.Fs, error) {
 	if err != nil {
 		return nil, err
 	}
-	account := fs.ConfigFileGet(name, "account")
+	account := config.FileGet(name, "account")
 	if account == "" {
 		return nil, errors.New("account not found")
 	}
-	key := fs.ConfigFileGet(name, "key")
+	key := config.FileGet(name, "key")
 	if key == "" {
 		return nil, errors.New("key not found")
 	}
-	endpoint := fs.ConfigFileGet(name, "endpoint", defaultEndpoint)
+	endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
 	f := &Fs{
 		name:         name,
 		bucket:       bucket,
@@ -252,7 +259,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		account:      account,
 		key:          key,
 		endpoint:     endpoint,
-		srv:          rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler),
+		srv:          rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
 		pacer:        pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
 		bufferTokens: make(chan []byte, fs.Config.Transfers),
 	}
@@ -615,7 +622,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	if f.bucket == "" {
 		return fs.ErrorListBucketRequired
 	}
-	list := fs.NewListRHelper(callback)
+	list := walk.NewListRHelper(callback)
 	last := ""
 	err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
 		entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
@@ -868,16 +875,16 @@ func (f *Fs) purge(oldOnly bool) error {
 		go func() {
 			defer wg.Done()
 			for object := range toBeDeleted {
-				fs.Stats.Checking(object.Name)
+				accounting.Stats.Checking(object.Name)
 				checkErr(f.deleteByID(object.ID, object.Name))
-				fs.Stats.DoneChecking(object.Name)
+				accounting.Stats.DoneChecking(object.Name)
 			}
 		}()
 	}
 	last := ""
 	checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
 		if !isDirectory {
-			fs.Stats.Checking(remote)
+			accounting.Stats.Checking(remote)
 			if oldOnly && last != remote {
 				if object.Action == "hide" {
 					fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
@@ -890,7 +897,7 @@ func (f *Fs) purge(oldOnly bool) error {
 				toBeDeleted <- object
 			}
 			last = remote
-			fs.Stats.DoneChecking(remote)
+			accounting.Stats.DoneChecking(remote)
 		}
 		return nil
 	}))
@@ -914,8 +921,8 @@ func (f *Fs) CleanUp() error {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashSHA1)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashSHA1)
 }
 
 // ------------------------------------------------------------
@@ -939,9 +946,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the Sha-1 of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashSHA1 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashSHA1 {
+		return "", hash.ErrHashUnsupported
 	}
 	if o.sha1 == "" {
 		// Error is logged in readMetaData
@@ -1094,7 +1101,7 @@ type openFile struct {
 	o     *Object        // Object we are reading for
 	resp  *http.Response // response of the GET
 	body  io.Reader      // reading from here
-	hash  hash.Hash      // currently accumulating SHA1
+	hash  gohash.Hash    // currently accumulating SHA1
 	bytes int64          // number of bytes read on this connection
 	eof   bool           // whether we have read end of file
 }
@@ -1279,7 +1286,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
 
 	modTime := src.ModTime()
 
-	calculatedSha1, _ := src.Hash(fs.HashSHA1)
+	calculatedSha1, _ := src.Hash(hash.HashSHA1)
 	if calculatedSha1 == "" {
 		calculatedSha1 = "hex_digits_at_end"
 		har := newHashAppendingReader(in, sha1.New())
diff --git a/backend/b2/upload.go b/backend/b2/upload.go
index f7d6234fc..6e88055e3 100644
--- a/backend/b2/upload.go
+++ b/backend/b2/upload.go
@@ -9,19 +9,21 @@ import (
 	"crypto/sha1"
 	"encoding/hex"
 	"fmt"
-	"hash"
+	gohash "hash"
 	"io"
 	"strings"
 	"sync"
 
 	"github.com/ncw/rclone/backend/b2/api"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/rest"
 	"github.com/pkg/errors"
 )
 
 type hashAppendingReader struct {
-	h         hash.Hash
+	h         gohash.Hash
 	in        io.Reader
 	hexSum    string
 	hexReader io.Reader
@@ -58,7 +60,7 @@ func (har *hashAppendingReader) HexSum() string {
 // newHashAppendingReader takes a Reader and a Hash and will append the hex sum
 // after the original reader reaches EOF. The increased size depends on the
 // given hash, which may be queried through AdditionalLength()
-func newHashAppendingReader(in io.Reader, h hash.Hash) *hashAppendingReader {
+func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
 	withHash := io.TeeReader(in, h)
 	return &hashAppendingReader{h: h, in: withHash}
 }
@@ -113,7 +115,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
 		},
 	}
 	// Set the SHA1 if known
-	if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
+	if calculatedSha1, err := src.Hash(hash.HashSHA1); err == nil && calculatedSha1 != "" {
 		request.Info[sha1Key] = calculatedSha1
 	}
 	var response api.StartLargeFileResponse
@@ -219,7 +221,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
 		opts := rest.Opts{
 			Method:  "POST",
 			RootURL: upload.UploadURL,
-			Body:    fs.AccountPart(up.o, in),
+			Body:    accounting.AccountPart(up.o, in),
 			ExtraHeaders: map[string]string{
 				"Authorization":    upload.AuthorizationToken,
 				"X-Bz-Part-Number": fmt.Sprintf("%d", part),
@@ -329,7 +331,7 @@ func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
 	errs := make(chan error, 1)
 	hasMoreParts := true
 	var wg sync.WaitGroup
-	fs.AccountByPart(up.o) // Cancel whole file accounting before reading
+	accounting.AccountByPart(up.o) // Cancel whole file accounting before reading
 
 	// Transfer initial chunk
 	up.size = int64(len(initialUploadBlock))
@@ -390,7 +392,7 @@ func (up *largeUpload) Upload() error {
 	errs := make(chan error, 1)
 	var wg sync.WaitGroup
 	var err error
-	fs.AccountByPart(up.o) // Cancel whole file accounting before reading
+	accounting.AccountByPart(up.o) // Cancel whole file accounting before reading
 outer:
 	for part := int64(1); part <= up.parts; part++ {
 		// Check any errors
diff --git a/backend/box/box.go b/backend/box/box.go
index 029454633..cb89f8cb5 100644
--- a/backend/box/box.go
+++ b/backend/box/box.go
@@ -22,9 +22,11 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/backend/box/api"
-	"github.com/ncw/rclone/box/api"
-	"github.com/ncw/rclone/dircache"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/dircache"
 	"github.com/ncw/rclone/lib/oauthutil"
 	"github.com/ncw/rclone/lib/pacer"
@@ -56,7 +58,7 @@ var (
 			TokenURL: "https://app.box.com/api/oauth2/token",
 		},
 		ClientID:     rcloneClientID,
-		ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
 		RedirectURL:  oauthutil.RedirectURL,
 	}
 	uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
@@ -75,14 +77,14 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Box App Client Id - leave blank normally.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Box App Client Secret - leave blank normally.",
 		}},
 	})
-	fs.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
+	flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
 }
 
 // Fs represents a remote box
@@ -160,7 +162,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
 		authRety = true
 		fs.Debugf(nil, "Should retry: %v", err)
 	}
-	return authRety || fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
+	return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
 }
 
 // substitute reserved characters for box
@@ -827,8 +829,8 @@ func (f *Fs) DirCacheFlush() {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashSHA1)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashSHA1)
 }
 
 // ------------------------------------------------------------
@@ -857,9 +859,9 @@ func (o *Object) srvPath() string {
 }
 
 // Hash returns the SHA-1 of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashSHA1 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashSHA1 {
+		return "", hash.ErrHashUnsupported
 	}
 	return o.sha1, nil
 }
diff --git a/backend/cache/cache.go b/backend/cache/cache.go
index a2a680440..874fe4975 100644
--- a/backend/cache/cache.go
+++ b/backend/cache/cache.go
@@ -18,6 +18,10 @@ import (
 
 	"github.com/ncw/rclone/backend/crypt"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/time/rate"
@@ -47,18 +51,18 @@ const (
 // Globals
 var (
 	// Flags
-	cacheDbPath             = fs.StringP("cache-db-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cache DB")
-	cacheChunkPath          = fs.StringP("cache-chunk-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cached chunk files")
-	cacheDbPurge            = fs.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
-	cacheChunkSize          = fs.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
-	cacheTotalChunkSize     = fs.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
-	cacheChunkCleanInterval = fs.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
-	cacheInfoAge            = fs.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
-	cacheReadRetries        = fs.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
-	cacheTotalWorkers       = fs.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
-	cacheChunkNoMemory      = fs.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
-	cacheRps                = fs.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
-	cacheStoreWrites        = fs.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
+	cacheDbPath             = flags.StringP("cache-db-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cache DB")
+	cacheChunkPath          = flags.StringP("cache-chunk-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cached chunk files")
+	cacheDbPurge            = flags.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
+	cacheChunkSize          = flags.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
+	cacheTotalChunkSize     = flags.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
+	cacheChunkCleanInterval = flags.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
+	cacheInfoAge            = flags.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
+	cacheReadRetries        = flags.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
+	cacheTotalWorkers       = flags.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
+	cacheChunkNoMemory      = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
+	cacheRps                = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
+	cacheStoreWrites        = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
 )
 
 // Register with Fs
@@ -223,7 +227,7 @@ type Fs struct {
 
 // NewFs contstructs an Fs from the path, container:path
 func NewFs(name, rpath string) (fs.Fs, error) {
-	remote := fs.ConfigFileGet(name, "remote")
+	remote := config.FileGet(name, "remote")
 	if strings.HasPrefix(remote, name+":") {
 		return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
 	}
@@ -235,10 +239,10 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	}
 	fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
 
-	plexURL := fs.ConfigFileGet(name, "plex_url")
-	plexToken := fs.ConfigFileGet(name, "plex_token")
+	plexURL := config.FileGet(name, "plex_url")
+	plexToken := config.FileGet(name, "plex_token")
 	var chunkSize fs.SizeSuffix
-	chunkSizeString := fs.ConfigFileGet(name, "chunk_size", DefCacheChunkSize)
+	chunkSizeString := config.FileGet(name, "chunk_size", DefCacheChunkSize)
 	if *cacheChunkSize != DefCacheChunkSize {
 		chunkSizeString = *cacheChunkSize
 	}
@@ -247,7 +251,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 		return nil, errors.Wrapf(err, "failed to understand chunk size", chunkSizeString)
 	}
 	var chunkTotalSize fs.SizeSuffix
-	chunkTotalSizeString := fs.ConfigFileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
+	chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
 	if *cacheTotalChunkSize != DefCacheTotalChunkSize {
 		chunkTotalSizeString = *cacheTotalChunkSize
 	}
@@ -260,7 +264,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	if err != nil {
 		return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr)
 	}
-	infoAge := fs.ConfigFileGet(name, "info_age", DefCacheInfoAge)
+	infoAge := config.FileGet(name, "info_age", DefCacheInfoAge)
 	if *cacheInfoAge != DefCacheInfoAge {
 		infoAge = *cacheInfoAge
 	}
@@ -301,10 +305,10 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 				return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
 			}
 		} else {
-			plexUsername := fs.ConfigFileGet(name, "plex_username")
-			plexPassword := fs.ConfigFileGet(name, "plex_password")
+			plexUsername := config.FileGet(name, "plex_username")
+			plexPassword := config.FileGet(name, "plex_password")
 			if plexPassword != "" && plexUsername != "" {
-				decPass, err := fs.Reveal(plexPassword)
+				decPass, err := config.Reveal(plexPassword)
 				if err != nil {
 					decPass = plexPassword
 				}
@@ -319,8 +323,8 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	dbPath := *cacheDbPath
 	chunkPath := *cacheChunkPath
 	// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
-	if dbPath != filepath.Join(fs.CacheDir, "cache-backend") &&
-		chunkPath == filepath.Join(fs.CacheDir, "cache-backend") {
+	if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
+		chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
 		chunkPath = dbPath
 	}
 	if filepath.Ext(dbPath) != "" {
@@ -506,7 +510,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
 	return cachedEntries, nil
 }
 
-func (f *Fs) recurse(dir string, list *fs.ListRHelper) error {
+func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
 	entries, err := f.List(dir)
 	if err != nil {
 		return err
@@ -558,7 +562,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	}
 
 	// if we're here, we're gonna do a standard recursive traversal and cache everything
-	list := fs.NewListRHelper(callback)
+	list := walk.NewListRHelper(callback)
 	err = f.recurse(dir, list)
 	if err != nil {
 		return err
@@ -895,7 +899,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
+func (f *Fs) Hashes() hash.Set {
 	return f.Fs.Hashes()
 }
 
diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go
index 9ad29587f..f9523b62e 100644
--- a/backend/cache/cache_internal_test.go
+++ b/backend/cache/cache_internal_test.go
@@ -20,6 +20,8 @@ import (
 	//"strings"
 
 	"github.com/ncw/rclone/backend/cache"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/object"
 	//"github.com/ncw/rclone/cmd/mount"
 	//_ "github.com/ncw/rclone/cmd/cmount"
 	//"github.com/ncw/rclone/cmd/mountlib"
@@ -492,7 +494,7 @@ func writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object
 func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
 	in := bytes.NewReader(data)
 	modTime := time.Now()
-	objInfo := fs.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
+	objInfo := object.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
 
 	obj, err := f.Put(in, objInfo)
 	require.NoError(t, err)
@@ -503,8 +505,8 @@ func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Obje
 func updateObjectBytes(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
 	in1 := bytes.NewReader(data1)
 	in2 := bytes.NewReader(data2)
-	objInfo1 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
-	objInfo2 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
+	objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
+	objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
 
 	obj, err := f.Put(in1, objInfo1)
 	require.NoError(t, err)
@@ -540,15 +542,15 @@ func cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
 
 func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote string, purge bool, cfg map[string]string) (fs.Fs, *cache.Persistent) {
 	fstest.Initialise()
-	dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
-	chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
+	dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
+	chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
 	boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
 	require.NoError(t, err)
 
 	localExists := false
 	cacheExists := false
 	cryptExists := false
-	for _, s := range fs.ConfigFileSections() {
+	for _, s := range config.FileSections() {
 		if s == localRemote {
 			localExists = true
 		}
@@ -563,28 +565,28 @@ func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote st
 	localRemoteWrap := ""
 	if !localExists {
 		localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
-		fs.ConfigFileSet(localRemote, "type", "local")
-		fs.ConfigFileSet(localRemote, "nounc", "true")
+		config.FileSet(localRemote, "type", "local")
+		config.FileSet(localRemote, "nounc", "true")
 	}
 
 	if !cacheExists {
-		fs.ConfigFileSet(cacheRemote, "type", "cache")
-		fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
+		config.FileSet(cacheRemote, "type", "cache")
+		config.FileSet(cacheRemote, "remote", localRemoteWrap)
 	}
 	if c, ok := cfg["chunk_size"]; ok {
-		fs.ConfigFileSet(cacheRemote, "chunk_size", c)
+		config.FileSet(cacheRemote, "chunk_size", c)
 	} else {
-		fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
+		config.FileSet(cacheRemote, "chunk_size", "1m")
 	}
 	if c, ok := cfg["chunk_total_size"]; ok {
-		fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
+		config.FileSet(cacheRemote, "chunk_total_size", c)
 	} else {
-		fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
+		config.FileSet(cacheRemote, "chunk_total_size", "2m")
 	}
 	if c, ok := cfg["info_age"]; ok {
-		fs.ConfigFileSet(cacheRemote, "info_age", c)
+		config.FileSet(cacheRemote, "info_age", c)
 	} else {
-		fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
+		config.FileSet(cacheRemote, "info_age", infoAge.String())
 	}
 
 	if !cryptExists {
@@ -627,14 +629,14 @@ func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote st
 
 func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[string]string) (fs.Fs, *cache.Persistent) {
 	fstest.Initialise()
-	dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
-	chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
+	dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
+	chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
 	boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
 	require.NoError(t, err)
 
 	localExists := false
 	cacheExists := false
-	for _, s := range fs.ConfigFileSections() {
+	for _, s := range config.FileSections() {
 		if s == localRemote {
 			localExists = true
 		}
@@ -646,28 +648,28 @@ func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[stri
 	localRemoteWrap := ""
 	if !localExists {
 		localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
-		fs.ConfigFileSet(localRemote, "type", "local")
-		fs.ConfigFileSet(localRemote, "nounc", "true")
+		config.FileSet(localRemote, "type", "local")
+		config.FileSet(localRemote, "nounc", "true")
 	}
 
 	if !cacheExists {
-		fs.ConfigFileSet(cacheRemote, "type", "cache")
-		fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
+		config.FileSet(cacheRemote, "type", "cache")
+		config.FileSet(cacheRemote, "remote", localRemoteWrap)
 	}
 	if c, ok := cfg["chunk_size"]; ok {
-		fs.ConfigFileSet(cacheRemote, "chunk_size", c)
+		config.FileSet(cacheRemote, "chunk_size", c)
 	} else {
-		fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
+		config.FileSet(cacheRemote, "chunk_size", "1m")
 	}
 	if c, ok := cfg["chunk_total_size"]; ok {
-		fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
+		config.FileSet(cacheRemote, "chunk_total_size", c)
 	} else {
-		fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
+		config.FileSet(cacheRemote, "chunk_total_size", "2m")
 	}
 	if c, ok := cfg["info_age"]; ok {
-		fs.ConfigFileSet(cacheRemote, "info_age", c)
+		config.FileSet(cacheRemote, "info_age", c)
 	} else {
-		fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
+		config.FileSet(cacheRemote, "info_age", infoAge.String())
 	}
 
 	if c, ok := cfg["cache-chunk-no-memory"]; ok {
diff --git a/backend/cache/object.go b/backend/cache/object.go
index 184730b31..9de7b3b7d 100644
--- a/backend/cache/object.go
+++ b/backend/cache/object.go
@@ -13,21 +13,22 @@ import (
 	"strconv"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/hash"
 )
 
 // Object is a generic file like object that stores basic information about it
 type Object struct {
 	fs.Object `json:"-"`
 
-	CacheFs       *Fs                    `json:"-"`        // cache fs
-	Name          string                 `json:"name"`     // name of the directory
-	Dir           string                 `json:"dir"`      // abs path of the object
-	CacheModTime  int64                  `json:"modTime"`  // modification or creation time - IsZero for unknown
-	CacheSize     int64                  `json:"size"`     // size of directory and contents or -1 if unknown
-	CacheStorable bool                   `json:"storable"` // says whether this object can be stored
-	CacheType     string                 `json:"cacheType"`
-	CacheTs       time.Time              `json:"cacheTs"`
-	cacheHashes   map[fs.HashType]string // all supported hashes cached
+	CacheFs       *Fs                  `json:"-"`        // cache fs
+	Name          string               `json:"name"`     // name of the directory
+	Dir           string               `json:"dir"`      // abs path of the object
+	CacheModTime  int64                `json:"modTime"`  // modification or creation time - IsZero for unknown
+	CacheSize     int64                `json:"size"`     // size of directory and contents or -1 if unknown
+	CacheStorable bool                 `json:"storable"` // says whether this object can be stored
+	CacheType     string               `json:"cacheType"`
+	CacheTs       time.Time            `json:"cacheTs"`
+	cacheHashes   map[hash.Type]string // all supported hashes cached
 
 	refreshMutex sync.Mutex
 }
@@ -80,10 +81,10 @@ func (o *Object) UnmarshalJSON(b []byte) error {
 		return err
 	}
 
-	o.cacheHashes = make(map[fs.HashType]string)
+	o.cacheHashes = make(map[hash.Type]string)
 	for k, v := range aux.Hashes {
 		ht, _ := strconv.Atoi(k)
-		o.cacheHashes[fs.HashType(ht)] = v
+		o.cacheHashes[hash.Type(ht)] = v
 	}
 
 	return nil
@@ -112,7 +113,7 @@ func (o *Object) updateData(source fs.Object) {
 	o.CacheSize = source.Size()
 	o.CacheStorable = source.Storable()
 	o.CacheTs = time.Now()
-	o.cacheHashes = make(map[fs.HashType]string)
+	o.cacheHashes = make(map[hash.Type]string)
 }
 
 // Fs returns its FS info
@@ -251,7 +252,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
 
 	o.CacheModTime = src.ModTime().UnixNano()
 	o.CacheSize = src.Size()
-	o.cacheHashes = make(map[fs.HashType]string)
+	o.cacheHashes = make(map[hash.Type]string)
 	o.persist()
 
 	return nil
@@ -274,9 +275,9 @@ func (o *Object) Remove() error {
 
 // Hash requests a hash of the object and stores in the cache
 // since it might or might not be called, this is lazy loaded
-func (o *Object) Hash(ht fs.HashType) (string, error) {
+func (o *Object) Hash(ht hash.Type) (string, error) {
 	if o.cacheHashes == nil {
-		o.cacheHashes = make(map[fs.HashType]string)
+		o.cacheHashes = make(map[hash.Type]string)
 	}
 
 	cachedHash, found := o.cacheHashes[ht]
diff --git a/backend/cache/plex.go b/backend/cache/plex.go
index 95ff38ac9..5b2b7f095 100644
--- a/backend/cache/plex.go
+++ b/backend/cache/plex.go
@@ -13,6 +13,7 @@ import (
 	"sync"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 )
 
 const (
@@ -107,8 +108,8 @@ func (p *plexConnector) authenticate() error {
 	}
 	p.token = token
 	if p.token != "" {
-		fs.ConfigFileSet(p.f.Name(), "plex_token", p.token)
-		fs.SaveConfig()
+		config.FileSet(p.f.Name(), "plex_token", p.token)
+		config.SaveConfig()
 		fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
 	}
 
diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go
index 911454524..0ca714f0d 100644
--- a/backend/crypt/crypt.go
+++ b/backend/crypt/crypt.go
@@ -10,13 +10,16 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/pkg/errors"
 )
 
 // Globals
 var (
 	// Flags
-	cryptShowMapping = fs.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
+	cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
 )
 
 // Register with Fs
@@ -71,25 +74,25 @@ func init() {
 
 // NewFs contstructs an Fs from the path, container:path
 func NewFs(name, rpath string) (fs.Fs, error) {
-	mode, err := NewNameEncryptionMode(fs.ConfigFileGet(name, "filename_encryption", "standard"))
+	mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
 	if err != nil {
 		return nil, err
 	}
-	dirNameEncrypt, err := strconv.ParseBool(fs.ConfigFileGet(name, "directory_name_encryption", "true"))
+	dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
 	if err != nil {
 		return nil, err
 	}
-	password := fs.ConfigFileGet(name, "password", "")
+	password := config.FileGet(name, "password", "")
 	if password == "" {
 		return nil, errors.New("password not set in config file")
 	}
-	password, err = fs.Reveal(password)
+	password, err = config.Reveal(password)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to decrypt password")
 	}
-	salt := fs.ConfigFileGet(name, "password2", "")
+	salt := config.FileGet(name, "password2", "")
 	if salt != "" {
-		salt, err = fs.Reveal(salt)
+		salt, err = config.Reveal(salt)
 		if err != nil {
 			return nil, errors.Wrap(err, "failed to decrypt password2")
 		}
@@ -98,7 +101,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to make cipher")
 	}
-	remote := fs.ConfigFileGet(name, "remote")
+	remote := config.FileGet(name, "remote")
 	if strings.HasPrefix(remote, name+":") {
 		return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
 	}
@@ -305,8 +308,8 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashNone)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashNone)
 }
 
 // Mkdir makes the directory (container, bucket)
@@ -459,7 +462,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
 // src with it, and calcuates the hash given by HashType on the fly
 //
 // Note that we break lots of encapsulation in this function.
-func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType fs.HashType) (hash string, err error) {
+func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
 	// Read the nonce - opening the file is sufficient to read the nonce in
 	in, err := o.Open()
 	if err != nil {
@@ -499,7 +502,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType fs.HashType) (hash s
 	}
 
 	// pipe into hash
-	m := fs.NewMultiHasher()
+	m := hash.NewMultiHasher()
 	_, err = io.Copy(m, out)
 	if err != nil {
 		return "", errors.Wrap(err, "failed to hash data")
@@ -558,8 +561,8 @@ func (o *Object) Size() int64 {
 
 // Hash returns the selected checksum of the file
 // If no checksum is available it returns ""
-func (o *Object) Hash(hash fs.HashType) (string, error) {
-	return "", fs.ErrHashUnsupported
+func (o *Object) Hash(ht hash.Type) (string, error) {
+	return "", hash.ErrHashUnsupported
 }
 
 // UnWrap returns the wrapped Object
@@ -652,7 +655,7 @@ func (o *ObjectInfo) Size() int64 {
 
 // Hash returns the selected checksum of the file
 // If no checksum is available it returns ""
-func (o *ObjectInfo) Hash(hash fs.HashType) (string, error) {
+func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
 	return "", nil
 }
 
diff --git a/backend/crypt/crypt_config_test.go b/backend/crypt/crypt_config_test.go
index 3cbe82996..84e79dc05 100644
--- a/backend/crypt/crypt_config_test.go
+++ b/backend/crypt/crypt_config_test.go
@@ -4,7 +4,7 @@ import (
 	"os"
 	"path/filepath"
 
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/ncw/rclone/fstest/fstests"
 )
 
@@ -19,15 +19,15 @@ func init() {
 	fstests.ExtraConfig = []fstests.ExtraConfigItem{
 		{Name: name, Key: "type", Value: "crypt"},
 		{Name: name, Key: "remote", Value: tempdir},
-		{Name: name, Key: "password", Value: fs.MustObscure("potato")},
+		{Name: name, Key: "password", Value: config.MustObscure("potato")},
 		{Name: name, Key: "filename_encryption", Value: "standard"},
 		{Name: name2, Key: "type", Value: "crypt"},
 		{Name: name2, Key: "remote", Value: tempdir2},
-		{Name: name2, Key: "password", Value: fs.MustObscure("potato2")},
+		{Name: name2, Key: "password", Value: config.MustObscure("potato2")},
 		{Name: name2, Key: "filename_encryption", Value: "off"},
 		{Name: name3, Key: "type", Value: "crypt"},
 		{Name: name3, Key: "remote", Value: tempdir3},
-		{Name: name3, Key: "password", Value: fs.MustObscure("potato2")},
+		{Name: name3, Key: "password", Value: config.MustObscure("potato2")},
 		{Name: name3, Key: "filename_encryption", Value: "obfuscate"},
 	}
 	fstests.SkipBadWindowsCharacters[name3+":"] = true
diff --git a/backend/drive/drive.go b/backend/drive/drive.go
index 08f7ddeb3..ef0c2876b 100644
--- a/backend/drive/drive.go
+++ b/backend/drive/drive.go
@@ -21,11 +21,15 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/dircache"
 	"github.com/ncw/rclone/lib/oauthutil"
 	"github.com/ncw/rclone/lib/pacer"
 	"github.com/pkg/errors"
-	"github.com/spf13/pflag"
 	"golang.org/x/oauth2"
 	"golang.org/x/oauth2/google"
 	"google.golang.org/api/drive/v2"
@@ -46,13 +50,13 @@ const (
 // Globals
 var (
 	// Flags
-	driveAuthOwnerOnly = fs.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
-	driveUseTrash      = fs.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
-	driveSkipGdocs     = fs.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
-	driveSharedWithMe  = fs.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
-	driveTrashedOnly   = fs.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
-	driveExtensions    = fs.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
-	driveListChunk     = pflag.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
+	driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
+	driveUseTrash      = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
+	driveSkipGdocs     = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
+	driveSharedWithMe  = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
+	driveTrashedOnly   = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
+	driveExtensions    = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
+	driveListChunk     = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
 	// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
 	// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
 	chunkSize         = fs.SizeSuffix(8 * 1024 * 1024)
@@ -62,7 +66,7 @@ var (
 		Scopes:       []string{"https://www.googleapis.com/auth/drive"},
 		Endpoint:     google.Endpoint,
 		ClientID:     rcloneClientID,
-		ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
 		RedirectURL:  oauthutil.TitleBarRedirectURL,
 	}
 	mimeTypeToExtension = map[string]string{
@@ -99,7 +103,7 @@ func init() {
 		NewFs:       NewFs,
 		Config: func(name string) {
 			var err error
-			if fs.ConfigFileGet(name, "service_account_file") == "" {
+			if config.FileGet(name, "service_account_file") == "" {
 				err = oauthutil.Config("drive", name, driveConfig)
 				if err != nil {
 					log.Fatalf("Failed to configure token: %v", err)
@@ -111,18 +115,18 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Google Application Client Id - leave blank normally.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Google Application Client Secret - leave blank normally.",
 		}, {
 			Name: "service_account_file",
 			Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
 		}},
 	})
-	fs.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
-	fs.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
+	flags.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
+	flags.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
 
 	// Invert mimeTypeToExtension
 	extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
@@ -185,7 +189,7 @@ func (f *Fs) Features() *fs.Features {
 func shouldRetry(err error) (again bool, errOut error) {
 	again = false
 	if err != nil {
-		if fs.ShouldRetry(err) {
+		if fserrors.ShouldRetry(err) {
 			again = true
 		} else {
 			switch gerr := err.(type) {
@@ -337,13 +341,13 @@ func (f *Fs) parseExtensions(extensions string) error {
 
 // Figure out if the user wants to use a team drive
 func configTeamDrive(name string) error {
-	teamDrive := fs.ConfigFileGet(name, "team_drive")
+	teamDrive := config.FileGet(name, "team_drive")
 	if teamDrive == "" {
 		fmt.Printf("Configure this as a team drive?\n")
 	} else {
 		fmt.Printf("Change current team drive ID %q?\n", teamDrive)
 	}
-	if !fs.Confirm() {
+	if !config.Confirm() {
 		return nil
 	}
 	client, err := authenticate(name)
@@ -379,9 +383,9 @@ func configTeamDrive(name string) error {
 	if len(driveIDs) == 0 {
 		fmt.Printf("No team drives found in your account")
 	} else {
-		driveID = fs.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
+		driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
 	}
-	fs.ConfigFileSet(name, "team_drive", driveID)
+	config.FileSet(name, "team_drive", driveID)
 	return nil
 }
 
@@ -399,7 +403,7 @@ func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
 	if err != nil {
 		return nil, errors.Wrap(err, "error processing credentials")
 	}
-	ctxWithSpecialClient := oauthutil.Context(fs.Config.Client())
+	ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
 	return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
 }
 
@@ -407,7 +411,7 @@ func authenticate(name string) (*http.Client, error) {
 	var oAuthClient *http.Client
 	var err error
 
-	serviceAccountPath := fs.ConfigFileGet(name, "service_account_file")
+	serviceAccountPath := config.FileGet(name, "service_account_file")
 	if serviceAccountPath != "" {
 		oAuthClient, err = getServiceAccountClient(serviceAccountPath)
 		if err != nil {
@@ -444,7 +448,7 @@ func NewFs(name, path string) (fs.Fs, error) {
 		root:  root,
 		pacer: newPacer(),
 	}
-	f.teamDriveID = fs.ConfigFileGet(name, "team_drive")
+	f.teamDriveID = config.FileGet(name, "team_drive")
 	f.isTeamDrive = f.teamDriveID != ""
 	f.features = (&fs.Features{
 		DuplicateFiles:          true,
@@ -1188,8 +1192,8 @@ func (f *Fs) DirCacheFlush() {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
 }
 
 // ------------------------------------------------------------
@@ -1213,9 +1217,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the Md5sum of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	return o.md5sum, nil
 }
diff --git a/backend/drive/upload.go b/backend/drive/upload.go
index 46d75c043..e6eb04d74 100644
--- a/backend/drive/upload.go
+++ b/backend/drive/upload.go
@@ -20,6 +20,8 @@ import (
 	"strconv"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/lib/readers"
 	"github.com/pkg/errors"
 	"google.golang.org/api/drive/v2"
 	"google.golang.org/api/googleapi"
@@ -201,7 +203,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
 		if reqSize >= int64(chunkSize) {
 			reqSize = int64(chunkSize)
 		}
-		chunk := fs.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
+		chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
 
 		// Transfer the chunk
 		err = rx.f.pacer.Call(func() (bool, error) {
@@ -241,7 +243,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
 	// Handle 404 Not Found errors when doing resumable uploads by starting
 	// the entire upload over from the beginning.
 	if rx.ret == nil {
-		return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
+		return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
 	}
 	return rx.ret, nil
 }
diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go
index ab4623b27..ba5f5ae8d 100644
--- a/backend/dropbox/dropbox.go
+++ b/backend/dropbox/dropbox.go
@@ -34,8 +34,13 @@ import (
 	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
 	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/oauthutil"
 	"github.com/ncw/rclone/lib/pacer"
+	"github.com/ncw/rclone/lib/readers"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2"
 )
@@ -59,7 +64,7 @@ var (
 		// },
 		Endpoint:     dropbox.OAuthEndpoint(""),
 		ClientID:     rcloneClientID,
-		ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
 		RedirectURL:  oauthutil.RedirectLocalhostURL,
 	}
 	// A regexp matching path names for files Dropbox ignores
@@ -112,7 +117,7 @@ func init() {
 			Help: "Dropbox App Secret - leave blank normally.",
 		}},
 	})
-	fs.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
+	flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
 }
 
 // Fs represents a remote dropbox server
@@ -170,7 +175,7 @@ func shouldRetry(err error) (bool, error) {
 	if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
 		return true, err
 	}
-	return fs.ShouldRetry(err), err
+	return fserrors.ShouldRetry(err), err
 }
 
 // NewFs contstructs an Fs from the path, container:path
@@ -181,11 +186,11 @@ func NewFs(name, root string) (fs.Fs, error) {
 
 	// Convert the old token if it exists.  The old token was just
 	// just a string, the new one is a JSON blob
-	oldToken := strings.TrimSpace(fs.ConfigFileGet(name, fs.ConfigToken))
+	oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
 	if oldToken != "" && oldToken[0] != '{' {
 		fs.Infof(name, "Converting token to new format")
 		newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
-		err := fs.ConfigSetValueAndSave(name, fs.ConfigToken, newToken)
+		err := config.SetValueAndSave(name, config.ConfigToken, newToken)
 		if err != nil {
 			return nil, errors.Wrap(err, "NewFS convert token")
 		}
@@ -675,8 +680,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashDropbox)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashDropbox)
 }
 
 // ------------------------------------------------------------
@@ -700,9 +705,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the dropbox special hash
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashDropbox {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashDropbox {
+		return "", hash.ErrHashUnsupported
 	}
 	err := o.readMetaData()
 	if err != nil {
@@ -813,7 +818,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
 	case files.DownloadAPIError:
 		// Don't attempt to retry copyright violation errors
 		if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
-			return nil, fs.NoRetryError(err)
+			return nil, fserrors.NoRetryError(err)
 		}
 	}
 
@@ -831,7 +836,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
 	if size != -1 {
 		chunks = int(size/chunkSize) + 1
 	}
-	in := fs.NewCountingReader(in0)
+	in := readers.NewCountingReader(in0)
 	buf := make([]byte, int(chunkSize))
 
 	fmtChunk := func(cur int, last bool) {
@@ -847,7 +852,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
 	// write the first chunk
 	fmtChunk(1, false)
 	var res *files.UploadSessionStartResult
-	chunk := fs.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
+	chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
 	err = o.fs.pacer.Call(func() (bool, error) {
 		// seek to the start in case this is a retry
 		if _, err = chunk.Seek(0, 0); err != nil {
@@ -883,7 +888,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
 		}
 		cursor.Offset = in.BytesRead()
 		fmtChunk(currentChunk, false)
-		chunk = fs.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
+		chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
 		err = o.fs.pacer.Call(func() (bool, error) {
 			// seek to the start in case this is a retry
 			if _, err = chunk.Seek(0, 0); err != nil {
@@ -906,7 +911,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
 		Commit: commitInfo,
 	}
 	fmtChunk(currentChunk, true)
-	chunk = fs.NewRepeatableReaderBuffer(in, buf)
+	chunk = readers.NewRepeatableReaderBuffer(in, buf)
 	err = o.fs.pacer.Call(func() (bool, error) {
 		// seek to the start in case this is a retry
 		if _, err = chunk.Seek(0, 0); err != nil {
diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go
index b3b304265..931dcf2af 100644
--- a/backend/ftp/ftp.go
+++ b/backend/ftp/ftp.go
@@ -13,6 +13,8 @@ import (
 
 	"github.com/jlaffaye/ftp"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/pkg/errors"
 )
 
@@ -160,33 +162,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
 func NewFs(name, root string) (ff fs.Fs, err error) {
 	// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
 	// FIXME Convert the old scheme used for the first beta - remove after release
-	if ftpURL := fs.ConfigFileGet(name, "url"); ftpURL != "" {
+	if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
 		fs.Infof(name, "Converting old configuration")
 		u, err := url.Parse(ftpURL)
 		if err != nil {
 			return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
 		}
 		parts := strings.Split(u.Host, ":")
-		fs.ConfigFileSet(name, "host", parts[0])
+		config.FileSet(name, "host", parts[0])
 		if len(parts) > 1 {
-			fs.ConfigFileSet(name, "port", parts[1])
+			config.FileSet(name, "port", parts[1])
 		}
-		fs.ConfigFileSet(name, "host", u.Host)
-		fs.ConfigFileSet(name, "user", fs.ConfigFileGet(name, "username"))
-		fs.ConfigFileSet(name, "pass", fs.ConfigFileGet(name, "password"))
-		fs.ConfigFileDeleteKey(name, "username")
-		fs.ConfigFileDeleteKey(name, "password")
-		fs.ConfigFileDeleteKey(name, "url")
-		fs.SaveConfig()
+		config.FileSet(name, "host", u.Host)
+		config.FileSet(name, "user", config.FileGet(name, "username"))
+		config.FileSet(name, "pass", config.FileGet(name, "password"))
+		config.FileDeleteKey(name, "username")
+		config.FileDeleteKey(name, "password")
+		config.FileDeleteKey(name, "url")
+		config.SaveConfig()
 		if u.Path != "" && u.Path != "/" {
 			fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
 		}
 	}
-	host := fs.ConfigFileGet(name, "host")
-	user := fs.ConfigFileGet(name, "user")
-	pass := fs.ConfigFileGet(name, "pass")
-	port := fs.ConfigFileGet(name, "port")
-	pass, err = fs.Reveal(pass)
+	host := config.FileGet(name, "host")
+	user := config.FileGet(name, "user")
+	pass := config.FileGet(name, "pass")
+	port := config.FileGet(name, "port")
+	pass, err = config.Reveal(pass)
 	if err != nil {
 		return nil, errors.Wrap(err, "NewFS decrypt password")
 	}
@@ -346,7 +348,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
 }
 
 // Hashes are not supported
-func (f *Fs) Hashes() fs.HashSet {
+func (f *Fs) Hashes() hash.Set {
 	return 0
 }
 
@@ -565,8 +567,8 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the hash of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	return "", hash.ErrHashUnsupported
 }
 
 // Size returns the size of an object in bytes
diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go
index 57a0a0bef..72faa7e9f 100644
--- a/backend/googlecloudstorage/googlecloudstorage.go
+++ b/backend/googlecloudstorage/googlecloudstorage.go
@@ -28,6 +28,11 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/rclone/lib/oauthutil"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2"
@@ -46,14 +51,14 @@ const (
 )
 
 var (
-	gcsLocation     = fs.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
-	gcsStorageClass = fs.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
+	gcsLocation     = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
+	gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
 	// Description of how to auth for this app
 	storageConfig = &oauth2.Config{
 		Scopes:       []string{storage.DevstorageFullControlScope},
 		Endpoint:     google.Endpoint,
 		ClientID:     rcloneClientID,
-		ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
 		RedirectURL:  oauthutil.TitleBarRedirectURL,
 	}
 )
@@ -65,7 +70,7 @@ func init() {
 		Description: "Google Cloud Storage (this is not Google Drive)",
 		NewFs:       NewFs,
 		Config: func(name string) {
-			if fs.ConfigFileGet(name, "service_account_file") != "" {
+			if config.FileGet(name, "service_account_file") != "" {
 				return
 			}
 			err := oauthutil.Config("google cloud storage", name, storageConfig)
@@ -74,10 +79,10 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Google Application Client Id - leave blank normally.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Google Application Client Secret - leave blank normally.",
 		}, {
 			Name: "project_number",
@@ -280,7 +285,7 @@ func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
 	if err != nil {
 		return nil, errors.Wrap(err, "error processing credentials")
 	}
-	ctxWithSpecialClient := oauthutil.Context(fs.Config.Client())
+	ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
 	return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
 }
 
@@ -289,7 +294,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 	var oAuthClient *http.Client
 	var err error
 
-	serviceAccountPath := fs.ConfigFileGet(name, "service_account_file")
+	serviceAccountPath := config.FileGet(name, "service_account_file")
 	if serviceAccountPath != "" {
 		oAuthClient, err = getServiceAccountClient(serviceAccountPath)
 		if err != nil {
@@ -311,11 +316,11 @@ func NewFs(name, root string) (fs.Fs, error) {
 		name:          name,
 		bucket:        bucket,
 		root:          directory,
-		projectNumber: fs.ConfigFileGet(name, "project_number"),
-		objectACL:     fs.ConfigFileGet(name, "object_acl"),
-		bucketACL:     fs.ConfigFileGet(name, "bucket_acl"),
-		location:      fs.ConfigFileGet(name, "location"),
-		storageClass:  fs.ConfigFileGet(name, "storage_class"),
+		projectNumber: config.FileGet(name, "project_number"),
+		objectACL:     config.FileGet(name, "object_acl"),
+		bucketACL:     config.FileGet(name, "bucket_acl"),
+		location:      config.FileGet(name, "location"),
+		storageClass:  config.FileGet(name, "storage_class"),
 	}
 	f.features = (&fs.Features{
 		ReadMimeType:  true,
@@ -538,7 +543,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	if f.bucket == "" {
 		return fs.ErrorListBucketRequired
 	}
-	list := fs.NewListRHelper(callback)
+	list := walk.NewListRHelper(callback)
 	err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
 		entry, err := f.itemToDirEntry(remote, object, isDirectory)
 		if err != nil {
@@ -669,8 +674,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
 }
 
 // ------------------------------------------------------------
@@ -694,9 +699,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the Md5sum of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	return o.md5sum, nil
 }
diff --git a/backend/http/http.go b/backend/http/http.go
index 5d2c888fb..dcce744a5 100644
--- a/backend/http/http.go
+++ b/backend/http/http.go
@@ -17,6 +17,9 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/rest"
 	"github.com/pkg/errors"
 	"golang.org/x/net/html"
@@ -79,7 +82,7 @@ func statusError(res *http.Response, err error) error {
 // NewFs creates a new Fs object from the name and root. It connects to
 // the host specified in the config file.
 func NewFs(name, root string) (fs.Fs, error) {
-	endpoint := fs.ConfigFileGet(name, "url")
+	endpoint := config.FileGet(name, "url")
 	if !strings.HasSuffix(endpoint, "/") {
 		endpoint += "/"
 	}
@@ -94,7 +97,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		return nil, err
 	}
 
-	client := fs.Config.Client()
+	client := fshttp.NewClient(fs.Config)
 
 	var isFile = false
 	if !strings.HasSuffix(u.String(), "/") {
@@ -363,8 +366,8 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
-func (o *Object) Hash(r fs.HashType) (string, error) {
-	return "", fs.ErrHashUnsupported
+func (o *Object) Hash(r hash.Type) (string, error) {
+	return "", hash.ErrHashUnsupported
 }
 
 // Size returns the size in bytes of the remote http file
@@ -434,9 +437,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
 	return res.Body, nil
 }
 
-// Hashes returns fs.HashNone to indicate remote hashing is unavailable
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashNone)
+// Hashes returns hash.HashNone to indicate remote hashing is unavailable
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashNone)
 }
 
 // Mkdir makes the root directory of the Fs object
diff --git a/backend/http/http_internal_test.go b/backend/http/http_internal_test.go
index 72f014127..0cfe325de 100644
--- a/backend/http/http_internal_test.go
+++ b/backend/http/http_internal_test.go
@@ -15,6 +15,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/ncw/rclone/fstest"
 	"github.com/ncw/rclone/lib/rest"
 	"github.com/stretchr/testify/assert"
@@ -36,12 +37,12 @@ func prepareServer(t *testing.T) func() {
 	ts := httptest.NewServer(fileServer)
 
 	// Configure the remote
-	fs.LoadConfig()
+	config.LoadConfig()
 	// fs.Config.LogLevel = fs.LogLevelDebug
 	// fs.Config.DumpHeaders = true
 	// fs.Config.DumpBodies = true
-	fs.ConfigFileSet(remoteName, "type", "http")
-	fs.ConfigFileSet(remoteName, "url", ts.URL)
+	config.FileSet(remoteName, "type", "http")
+	config.FileSet(remoteName, "url", ts.URL)
 
 	// return a function to tidy up
 	return ts.Close
diff --git a/backend/hubic/hubic.go b/backend/hubic/hubic.go
index 110f2d563..4e0ca3051 100644
--- a/backend/hubic/hubic.go
+++ b/backend/hubic/hubic.go
@@ -15,9 +15,9 @@ import (
 
 	"github.com/ncw/rclone/backend/swift"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fshttp"
 	"github.com/ncw/rclone/lib/oauthutil"
-	"github.com/ncw/rclone/oauthutil"
-	"github.com/ncw/rclone/swift"
 	swiftLib "github.com/ncw/swift"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2"
@@ -40,7 +40,7 @@ var (
 			TokenURL: "https://api.hubic.com/oauth/token/",
 		},
 		ClientID:     rcloneClientID,
-		ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
 		RedirectURL:  oauthutil.RedirectLocalhostURL,
 	}
 )
@@ -58,10 +58,10 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Hubic Client Id - leave blank normally.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Hubic Client Secret - leave blank normally.",
 		}},
 	})
@@ -159,7 +159,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		Auth:           newAuth(f),
 		ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
 		Timeout:        10 * fs.Config.Timeout,        // Use the timeouts in the transport
-		Transport:      fs.Config.Transport(),
+		Transport:      fshttp.NewTransport(fs.Config),
 	}
 	err = c.Authenticate()
 	if err != nil {
diff --git a/backend/local/local.go b/backend/local/local.go
index 667de902b..d509ce732 100644
--- a/backend/local/local.go
+++ b/backend/local/local.go
@@ -16,14 +16,17 @@ import (
 	"unicode/utf8"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/pkg/errors"
 	"google.golang.org/appengine/log"
 )
 
 var (
-	followSymlinks = fs.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
-	skipSymlinks   = fs.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
-	noUTFNorm      = fs.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
+	followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
+	skipSymlinks   = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
+	noUTFNorm      = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
 )
 
 // Constants
@@ -72,7 +75,7 @@ type Object struct {
 	size    int64  // file metadata - always present
 	mode    os.FileMode
 	modTime time.Time
-	hashes  map[fs.HashType]string // Hashes
+	hashes  map[hash.Type]string // Hashes
 }
 
 // ------------------------------------------------------------
@@ -85,7 +88,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
 	}
 
-	nounc := fs.ConfigFileGet(name, "nounc")
+	nounc := config.FileGet(name, "nounc")
 	f := &Fs{
 		name:     name,
 		warned:   make(map[string]struct{}),
@@ -532,8 +535,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.SupportedHashes
+func (f *Fs) Hashes() hash.Set {
+	return hash.SupportedHashes
 }
 
 // ------------------------------------------------------------
@@ -557,7 +560,7 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the requested hash of a file as a lowercase hex string
-func (o *Object) Hash(r fs.HashType) (string, error) {
+func (o *Object) Hash(r hash.Type) (string, error) {
 	// Check that the underlying file hasn't changed
 	oldtime := o.modTime
 	oldsize := o.size
@@ -571,12 +574,12 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
 	}
 
 	if o.hashes == nil {
-		o.hashes = make(map[fs.HashType]string)
+		o.hashes = make(map[hash.Type]string)
 		in, err := os.Open(o.path)
 		if err != nil {
 			return "", errors.Wrap(err, "hash: failed to open")
 		}
-		o.hashes, err = fs.HashStream(in)
+		o.hashes, err = hash.Stream(in)
 		closeErr := in.Close()
 		if err != nil {
 			return "", errors.Wrap(err, "hash: failed to read")
@@ -641,9 +644,9 @@ func (o *Object) Storable() bool {
 // localOpenFile wraps an io.ReadCloser and updates the md5sum of the
 // object that is read
 type localOpenFile struct {
-	o    *Object         // object that is open
-	in   io.ReadCloser   // handle we are wrapping
-	hash *fs.MultiHasher // currently accumulating hashes
+	o    *Object           // object that is open
+	in   io.ReadCloser     // handle we are wrapping
+	hash *hash.MultiHasher // currently accumulating hashes
 }
 
 // Read bytes from the object - see io.Reader
@@ -670,7 +673,7 @@ func (file *localOpenFile) Close() (err error) {
 // Open an object for read
 func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
 	var offset int64
-	hashes := fs.SupportedHashes
+	hashes := hash.SupportedHashes
 	for _, option := range options {
 		switch x := option.(type) {
 		case *fs.SeekOption:
@@ -694,7 +697,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
 		// don't attempt to make checksums
 		return fd, err
 	}
-	hash, err := fs.NewMultiHasherTypes(hashes)
+	hash, err := hash.NewMultiHasherTypes(hashes)
 	if err != nil {
 		return nil, err
 	}
@@ -715,7 +718,7 @@ func (o *Object) mkdirAll() error {
 
 // Update the object from in with modTime and size
 func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
-	hashes := fs.SupportedHashes
+	hashes := hash.SupportedHashes
 	for _, option := range options {
 		switch x := option.(type) {
 		case *fs.HashesOption:
@@ -734,7 +737,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
 	}
 
 	// Calculate the hash of the object we are reading as we go along
-	hash, err := fs.NewMultiHasherTypes(hashes)
+	hash, err := hash.NewMultiHasherTypes(hashes)
 	if err != nil {
 		return err
 	}
diff --git a/backend/local/read_device_unix.go b/backend/local/read_device_unix.go
index 05b15d537..a8c34e52d 100644
--- a/backend/local/read_device_unix.go
+++ b/backend/local/read_device_unix.go
@@ -9,10 +9,11 @@ import (
 	"syscall"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config/flags"
 )
 
 var (
-	oneFileSystem = fs.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
+	oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
 )
 
 // readDevice turns a valid os.FileInfo into a device number,
diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go
index 336fa61d3..1acbebe78 100644
--- a/backend/onedrive/onedrive.go
+++ b/backend/onedrive/onedrive.go
@@ -15,16 +15,16 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/backend/onedrive/api"
-	"github.com/ncw/rclone/dircache"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/dircache"
 	"github.com/ncw/rclone/lib/oauthutil"
 	"github.com/ncw/rclone/lib/pacer"
+	"github.com/ncw/rclone/lib/readers"
 	"github.com/ncw/rclone/lib/rest"
-	"github.com/ncw/rclone/oauthutil"
-	"github.com/ncw/rclone/onedrive/api"
-	"github.com/ncw/rclone/pacer"
-	"github.com/ncw/rclone/rest"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2"
 )
@@ -56,7 +56,7 @@ var (
 			TokenURL: "https://login.live.com/oauth20_token.srf",
 		},
 		ClientID:     rclonePersonalClientID,
-		ClientSecret: fs.MustReveal(rclonePersonalEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rclonePersonalEncryptedClientSecret),
 		RedirectURL:  oauthutil.RedirectLocalhostURL,
 	}
 
@@ -67,7 +67,7 @@ var (
 			TokenURL: "https://login.microsoftonline.com/common/oauth2/token",
 		},
 		ClientID:     rcloneBusinessClientID,
-		ClientSecret: fs.MustReveal(rcloneBusinessEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneBusinessEncryptedClientSecret),
 		RedirectURL:  oauthutil.RedirectLocalhostURL,
 	}
 	oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
@@ -87,7 +87,7 @@ func init() {
 			fmt.Printf("Choose OneDrive account type?\n")
 			fmt.Printf(" * Say b for a OneDrive business account\n")
 			fmt.Printf(" * Say p for a personal OneDrive account\n")
-			isPersonal := fs.Command([]string{"bBusiness", "pPersonal"}) == 'p'
+			isPersonal := config.Command([]string{"bBusiness", "pPersonal"}) == 'p'
 
 			if isPersonal {
 				// for personal accounts we don't safe a field about the account
@@ -103,7 +103,7 @@ func init() {
 				}
 
 				// Are we running headless?
-				if fs.ConfigFileGet(name, fs.ConfigAutomatic) != "" {
+				if config.FileGet(name, config.ConfigAutomatic) != "" {
 					// Yes, okay we are done
 					return
 				}
@@ -159,10 +159,10 @@ func init() {
 				} else if len(resourcesID) == 1 {
 					foundService = resourcesID[0]
 				} else {
-					foundService = fs.Choose("Choose resource URL", resourcesID, resourcesURL, false)
+					foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
 				}
 
-				fs.ConfigFileSet(name, configResourceURL, foundService)
+				config.FileSet(name, configResourceURL, foundService)
 				oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
 
 				// get the token from the inital config
@@ -218,16 +218,16 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Microsoft App Client Id - leave blank normally.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Microsoft App Client Secret - leave blank normally.",
 		}},
 	})
 
-	fs.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
-	fs.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
+	flags.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
+	flags.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
 }
 
 // Fs represents a remote one drive
@@ -306,7 +306,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
 		authRety = true
 		fs.Debugf(nil, "Should retry: %v", err)
 	}
-	return authRety || fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
+	return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
 }
 
 // readMetaDataForPath reads the metadata from the path
@@ -339,7 +339,7 @@ func errorHandler(resp *http.Response) error {
 // NewFs constructs an Fs from the path, container:path
 func NewFs(name, root string) (fs.Fs, error) {
 	// get the resource URL from the config file0
-	resourceURL := fs.ConfigFileGet(name, configResourceURL, "")
+	resourceURL := config.FileGet(name, configResourceURL, "")
 	// if we have a resource URL it's a business account otherwise a personal one
 	var rootURL string
 	var oauthConfig *oauth2.Config
@@ -743,10 +743,10 @@ func (f *Fs) waitForJob(location string, o *Object) error {
 		err = f.pacer.Call(func() (bool, error) {
 			resp, err = f.srv.Call(&opts)
 			if err != nil {
-				return fs.ShouldRetry(err), err
+				return fserrors.ShouldRetry(err), err
 			}
 			body, err = rest.ReadBody(resp)
-			return fs.ShouldRetry(err), err
+			return fserrors.ShouldRetry(err), err
 		})
 		if err != nil {
 			return err
@@ -915,8 +915,8 @@ func (f *Fs) DirCacheFlush() {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashSHA1)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashSHA1)
 }
 
 // ------------------------------------------------------------
@@ -945,9 +945,9 @@ func (o *Object) srvPath() string {
 }
 
 // Hash returns the SHA-1 of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashSHA1 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashSHA1 {
+		return "", hash.ErrHashUnsupported
 	}
 	return o.sha1, nil
 }
@@ -1161,7 +1161,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
 		if remaining < n {
 			n = remaining
 		}
-		seg := fs.NewRepeatableReader(io.LimitReader(in, n))
+		seg := readers.NewRepeatableReader(io.LimitReader(in, n))
 		fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
 		err = o.uploadFragment(uploadURL, position, size, seg, n)
 		if err != nil {
diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go
index 636821fb8..d30980763 100644
--- a/backend/pcloud/pcloud.go
+++ b/backend/pcloud/pcloud.go
@@ -22,16 +22,15 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/backend/pcloud/api"
-	"github.com/ncw/rclone/dircache"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/dircache"
 	"github.com/ncw/rclone/lib/oauthutil"
 	"github.com/ncw/rclone/lib/pacer"
 	"github.com/ncw/rclone/lib/rest"
-	"github.com/ncw/rclone/oauthutil"
-	"github.com/ncw/rclone/pacer"
-	"github.com/ncw/rclone/pcloud/api"
-	"github.com/ncw/rclone/rest"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2"
 )
@@ -56,7 +55,7 @@ var (
 			TokenURL: "https://api.pcloud.com/oauth2_token",
 		},
 		ClientID:     rcloneClientID,
-		ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
 		RedirectURL:  oauthutil.RedirectLocalhostURL,
 	}
 	uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
@@ -75,14 +74,14 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Pcloud App Client Id - leave blank normally.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Pcloud App Client Secret - leave blank normally.",
 		}},
 	})
-	fs.VarP(&uploadCutoff, "pcloud-upload-cutoff", "", "Cutoff for switching to multipart upload")
+	flags.VarP(&uploadCutoff, "pcloud-upload-cutoff", "", "Cutoff for switching to multipart upload")
 }
 
 // Fs represents a remote pcloud
@@ -174,7 +173,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
 		doRetry = true
 		fs.Debugf(nil, "Should retry: %v", err)
 	}
-	return doRetry || fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
+	return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
 }
 
 // substitute reserved characters for pcloud
@@ -812,8 +811,8 @@ func (f *Fs) DirCacheFlush() {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5 | fs.HashSHA1)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5 | hash.HashSHA1)
 }
 
 // ------------------------------------------------------------
@@ -859,9 +858,9 @@ func (o *Object) getHashes() (err error) {
 }
 
 // Hash returns the SHA-1 of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 && t != fs.HashSHA1 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 && t != hash.HashSHA1 {
+		return "", hash.ErrHashUnsupported
 	}
 	if o.md5 == "" && o.sha1 == "" {
 		err := o.getHashes()
@@ -869,7 +868,7 @@ func (o *Object) Hash(t fs.HashType) (string, error) {
 			return "", errors.Wrap(err, "failed to get hash")
 		}
 	}
-	if t == fs.HashMD5 {
+	if t == hash.HashMD5 {
 		return o.md5, nil
 	}
 	return o.sha1, nil
diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go
index f29dd49cf..696a658d7 100644
--- a/backend/qingstor/qingstor.go
+++ b/backend/qingstor/qingstor.go
@@ -17,8 +17,12 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/pkg/errors"
-	"github.com/yunify/qingstor-sdk-go/config"
+	qsConfig "github.com/yunify/qingstor-sdk-go/config"
 	qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
 	qs "github.com/yunify/qingstor-sdk-go/service"
 )
@@ -162,11 +166,11 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
 
 // qsConnection makes a connection to qingstor
 func qsServiceConnection(name string) (*qs.Service, error) {
-	accessKeyID := fs.ConfigFileGet(name, "access_key_id")
-	secretAccessKey := fs.ConfigFileGet(name, "secret_access_key")
+	accessKeyID := config.FileGet(name, "access_key_id")
+	secretAccessKey := config.FileGet(name, "secret_access_key")
 
 	switch {
-	case fs.ConfigFileGetBool(name, "env_auth", false):
+	case config.FileGetBool(name, "env_auth", false):
 		// No need for empty checks if "env_auth" is true
 	case accessKeyID == "" && secretAccessKey == "":
 		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
@@ -180,7 +184,7 @@ func qsServiceConnection(name string) (*qs.Service, error) {
 	host := "qingstor.com"
 	port := 443
 
-	endpoint := fs.ConfigFileGet(name, "endpoint", "")
+	endpoint := config.FileGet(name, "endpoint", "")
 	if endpoint != "" {
 		_protocol, _host, _port, err := qsParseEndpoint(endpoint)
 
@@ -201,19 +205,19 @@ func qsServiceConnection(name string) (*qs.Service, error) {
 	}
 
 	connectionRetries := 3
-	retries := fs.ConfigFileGet(name, "connection_retries", "")
+	retries := config.FileGet(name, "connection_retries", "")
 	if retries != "" {
 		connectionRetries, _ = strconv.Atoi(retries)
 	}
 
-	cf, err := config.NewDefault()
+	cf, err := qsConfig.NewDefault()
 	cf.AccessKeyID = accessKeyID
 	cf.SecretAccessKey = secretAccessKey
 	cf.Protocol = protocol
 	cf.Host = host
 	cf.Port = port
 	cf.ConnectionRetries = connectionRetries
-	cf.Connection = fs.Config.Client()
+	cf.Connection = fshttp.NewClient(fs.Config)
 
 	svc, _ := qs.Init(cf)
 
@@ -231,7 +235,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		return nil, err
 	}
 
-	zone := fs.ConfigFileGet(name, "zone")
+	zone := config.FileGet(name, "zone")
 	if zone == "" {
 		zone = "pek3a"
 	}
@@ -302,9 +306,9 @@ func (f *Fs) Precision() time.Duration {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
-	//return fs.HashSet(fs.HashNone)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
+	//return hash.HashSet(hash.HashNone)
 }
 
 // Features returns the optional features of this Fs
@@ -591,7 +595,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	if f.bucket == "" {
 		return fs.ErrorListBucketRequired
 	}
-	list := fs.NewListRHelper(callback)
+	list := walk.NewListRHelper(callback)
 	err = f.list(dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
 		entry, err := f.itemToDirEntry(remote, object, isDirectory)
 		if err != nil {
@@ -925,9 +929,9 @@ var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
 
 // Hash returns the selected checksum of the file
 // If no checksum is available it returns ""
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	etag := strings.Trim(strings.ToLower(o.etag), `"`)
 	// Check the etag is a valid md5sum
diff --git a/backend/s3/s3.go b/backend/s3/s3.go
index 72ff06cc8..2a084ef9a 100644
--- a/backend/s3/s3.go
+++ b/backend/s3/s3.go
@@ -37,6 +37,11 @@ import (
 	"github.com/aws/aws-sdk-go/service/s3"
 	"github.com/aws/aws-sdk-go/service/s3/s3manager"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/rclone/lib/rest"
 	"github.com/ncw/swift"
 	"github.com/pkg/errors"
@@ -233,8 +238,8 @@ const (
 // Globals
 var (
 	// Flags
-	s3ACL          = fs.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
-	s3StorageClass = fs.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
+	s3ACL          = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
+	s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
 )
 
 // Fs represents a remote s3 server
@@ -316,9 +321,9 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
 func s3Connection(name string) (*s3.S3, *session.Session, error) {
 	// Make the auth
 	v := credentials.Value{
-		AccessKeyID:     fs.ConfigFileGet(name, "access_key_id"),
-		SecretAccessKey: fs.ConfigFileGet(name, "secret_access_key"),
-		SessionToken:    fs.ConfigFileGet(name, "session_token"),
+		AccessKeyID:     config.FileGet(name, "access_key_id"),
+		SecretAccessKey: config.FileGet(name, "secret_access_key"),
+		SessionToken:    config.FileGet(name, "session_token"),
 	}
 
 	lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
@@ -348,7 +353,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
 	cred := credentials.NewChainCredentials(providers)
 
 	switch {
-	case fs.ConfigFileGetBool(name, "env_auth", false):
+	case config.FileGetBool(name, "env_auth", false):
 		// No need for empty checks if "env_auth" is true
 	case v.AccessKeyID == "" && v.SecretAccessKey == "":
 		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
@@ -359,8 +364,8 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
 		return nil, nil, errors.New("secret_access_key not found")
 	}
 
-	endpoint := fs.ConfigFileGet(name, "endpoint")
-	region := fs.ConfigFileGet(name, "region")
+	endpoint := config.FileGet(name, "endpoint")
+	region := config.FileGet(name, "region")
 	if region == "" && endpoint == "" {
 		endpoint = "https://s3.amazonaws.com/"
 	}
@@ -372,7 +377,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
 		WithMaxRetries(maxRetries).
 		WithCredentials(cred).
 		WithEndpoint(endpoint).
-		WithHTTPClient(fs.Config.Client()).
+		WithHTTPClient(fshttp.NewClient(fs.Config)).
 		WithS3ForcePathStyle(true)
 	// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
 	ses := session.New()
@@ -408,11 +413,11 @@ func NewFs(name, root string) (fs.Fs, error) {
 		c:                  c,
 		bucket:             bucket,
 		ses:                ses,
-		acl:                fs.ConfigFileGet(name, "acl"),
+		acl:                config.FileGet(name, "acl"),
 		root:               directory,
-		locationConstraint: fs.ConfigFileGet(name, "location_constraint"),
-		sse:                fs.ConfigFileGet(name, "server_side_encryption"),
-		storageClass:       fs.ConfigFileGet(name, "storage_class"),
+		locationConstraint: config.FileGet(name, "location_constraint"),
+		sse:                config.FileGet(name, "server_side_encryption"),
+		storageClass:       config.FileGet(name, "storage_class"),
 	}
 	f.features = (&fs.Features{
 		ReadMimeType:  true,
@@ -657,7 +662,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	if f.bucket == "" {
 		return fs.ErrorListBucketRequired
 	}
-	list := fs.NewListRHelper(callback)
+	list := walk.NewListRHelper(callback)
 	err = f.list(dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
 		entry, err := f.itemToDirEntry(remote, object, isDirectory)
 		if err != nil {
@@ -804,8 +809,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
 }
 
 // ------------------------------------------------------------
@@ -831,9 +836,9 @@ func (o *Object) Remote() string {
 var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
 
 // Hash returns the Md5sum of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	hash := strings.Trim(strings.ToLower(o.etag), `"`)
 	// Check the etag is a valid md5sum
@@ -1027,7 +1032,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
 	}
 
 	if size > uploader.PartSize {
-		hash, err := src.Hash(fs.HashMD5)
+		hash, err := src.Hash(hash.HashMD5)
 
 		if err == nil && matchMd5.MatchString(hash) {
 			hashBytes, err := hex.DecodeString(hash)
diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go
index dffc35a91..9460ca056 100644
--- a/backend/sftp/sftp.go
+++ b/backend/sftp/sftp.go
@@ -16,6 +16,9 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/pkg/errors"
 	"github.com/pkg/sftp"
 	"github.com/xanzy/ssh-agent"
@@ -94,7 +97,7 @@ type Fs struct {
 	port         string
 	url          string
 	mkdirLock    *stringLock
-	cachedHashes *fs.HashSet
+	cachedHashes *hash.Set
 	poolMu       sync.Mutex
 	pool         []*conn
 	connLimit    *rate.Limiter // for limiting number of connections per second
@@ -134,13 +137,13 @@ func readCurrentUser() (userName string) {
 // Dial starts a client connection to the given SSH server. It is a
 // convenience function that connects to the given network address,
 // initiates the SSH handshake, and then sets up a Client.
-func Dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
-	dialer := fs.Config.NewDialer()
+func Dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
+	dialer := fshttp.NewDialer(fs.Config)
 	conn, err := dialer.Dial(network, addr)
 	if err != nil {
 		return nil, err
 	}
-	c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
+	c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
 	if err != nil {
 		return nil, err
 	}
@@ -263,19 +266,19 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
 // NewFs creates a new Fs object from the name and root. It connects to
 // the host specified in the config file.
 func NewFs(name, root string) (fs.Fs, error) {
-	user := fs.ConfigFileGet(name, "user")
-	host := fs.ConfigFileGet(name, "host")
-	port := fs.ConfigFileGet(name, "port")
-	pass := fs.ConfigFileGet(name, "pass")
-	keyFile := fs.ConfigFileGet(name, "key_file")
-	insecureCipher := fs.ConfigFileGetBool(name, "use_insecure_cipher")
+	user := config.FileGet(name, "user")
+	host := config.FileGet(name, "host")
+	port := config.FileGet(name, "port")
+	pass := config.FileGet(name, "pass")
+	keyFile := config.FileGet(name, "key_file")
+	insecureCipher := config.FileGetBool(name, "use_insecure_cipher")
 	if user == "" {
 		user = currentUser
 	}
 	if port == "" {
 		port = "22"
 	}
-	config := &ssh.ClientConfig{
+	sshConfig := &ssh.ClientConfig{
 		User:            user,
 		Auth:            []ssh.AuthMethod{},
 		HostKeyCallback: ssh.InsecureIgnoreHostKey(),
@@ -283,8 +286,8 @@ func NewFs(name, root string) (fs.Fs, error) {
 	}
 
 	if insecureCipher {
-		config.Config.SetDefaults()
-		config.Config.Ciphers = append(config.Config.Ciphers, "aes128-cbc")
+		sshConfig.Config.SetDefaults()
+		sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
 	}
 
 	// Add ssh agent-auth if no password or file specified
@@ -297,7 +300,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		if err != nil {
 			return nil, errors.Wrap(err, "couldn't read ssh agent signers")
 		}
-		config.Auth = append(config.Auth, ssh.PublicKeys(signers...))
+		sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
 	}
 
 	// Load key file if specified
@@ -310,22 +313,22 @@ func NewFs(name, root string) (fs.Fs, error) {
 		if err != nil {
 			return nil, errors.Wrap(err, "failed to parse private key file")
 		}
-		config.Auth = append(config.Auth, ssh.PublicKeys(signer))
+		sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
 	}
 
 	// Auth from password if specified
 	if pass != "" {
-		clearpass, err := fs.Reveal(pass)
+		clearpass, err := config.Reveal(pass)
 		if err != nil {
 			return nil, err
 		}
-		config.Auth = append(config.Auth, ssh.Password(clearpass))
+		sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
 	}
 
 	f := &Fs{
 		name:      name,
 		root:      root,
-		config:    config,
+		config:    sshConfig,
 		host:      host,
 		port:      port,
 		url:       "sftp://" + user + "@" + host + ":" + port + "/" + root,
@@ -631,25 +634,25 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
 }
 
 // Hashes returns the supported hash types of the filesystem
-func (f *Fs) Hashes() fs.HashSet {
+func (f *Fs) Hashes() hash.Set {
 	if f.cachedHashes != nil {
 		return *f.cachedHashes
 	}
 
-	hashcheckDisabled := fs.ConfigFileGetBool(f.name, "disable_hashcheck")
+	hashcheckDisabled := config.FileGetBool(f.name, "disable_hashcheck")
 	if hashcheckDisabled {
-		return fs.HashSet(fs.HashNone)
+		return hash.Set(hash.HashNone)
 	}
 
 	c, err := f.getSftpConnection()
 	if err != nil {
 		fs.Errorf(f, "Couldn't get SSH connection to figure out Hashes: %v", err)
-		return fs.HashSet(fs.HashNone)
+		return hash.Set(hash.HashNone)
 	}
 	defer f.putSftpConnection(&c, err)
 	session, err := c.sshClient.NewSession()
 	if err != nil {
-		return fs.HashSet(fs.HashNone)
+		return hash.Set(hash.HashNone)
 	}
 	sha1Output, _ := session.Output("echo 'abc' | sha1sum")
 	expectedSha1 := "03cfd743661f07975fa2f1220c5194cbaff48451"
@@ -657,7 +660,7 @@ func (f *Fs) Hashes() fs.HashSet {
 
 	session, err = c.sshClient.NewSession()
 	if err != nil {
-		return fs.HashSet(fs.HashNone)
+		return hash.Set(hash.HashNone)
 	}
 	md5Output, _ := session.Output("echo 'abc' | md5sum")
 	expectedMd5 := "0bee89b07a248e27c83fc3d5951213c1"
@@ -666,15 +669,15 @@ func (f *Fs) Hashes() fs.HashSet {
 	sha1Works := parseHash(sha1Output) == expectedSha1
 	md5Works := parseHash(md5Output) == expectedMd5
 
-	set := fs.NewHashSet()
+	set := hash.NewHashSet()
 	if !sha1Works && !md5Works {
-		set.Add(fs.HashNone)
+		set.Add(hash.HashNone)
 	}
 	if sha1Works {
-		set.Add(fs.HashSHA1)
+		set.Add(hash.HashSHA1)
 	}
 	if md5Works {
-		set.Add(fs.HashMD5)
+		set.Add(hash.HashMD5)
 	}
 
 	_ = session.Close()
@@ -702,10 +705,10 @@ func (o *Object) Remote() string {
 
 // Hash returns the selected checksum of the file
 // If no checksum is available it returns ""
-func (o *Object) Hash(r fs.HashType) (string, error) {
-	if r == fs.HashMD5 && o.md5sum != nil {
+func (o *Object) Hash(r hash.Type) (string, error) {
+	if r == hash.HashMD5 && o.md5sum != nil {
 		return *o.md5sum, nil
-	} else if r == fs.HashSHA1 && o.sha1sum != nil {
+	} else if r == hash.HashSHA1 && o.sha1sum != nil {
 		return *o.sha1sum, nil
 	}
 
@@ -717,29 +720,29 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
 	o.fs.putSftpConnection(&c, err)
 	if err != nil {
 		o.fs.cachedHashes = nil // Something has changed on the remote system
-		return "", fs.ErrHashUnsupported
+		return "", hash.ErrHashUnsupported
 	}
 
-	err = fs.ErrHashUnsupported
+	err = hash.ErrHashUnsupported
 	var outputBytes []byte
 	escapedPath := shellEscape(o.path())
-	if r == fs.HashMD5 {
+	if r == hash.HashMD5 {
 		outputBytes, err = session.Output("md5sum " + escapedPath)
-	} else if r == fs.HashSHA1 {
+	} else if r == hash.HashSHA1 {
 		outputBytes, err = session.Output("sha1sum " + escapedPath)
 	}
 
 	if err != nil {
 		o.fs.cachedHashes = nil // Something has changed on the remote system
 		_ = session.Close()
-		return "", fs.ErrHashUnsupported
+		return "", hash.ErrHashUnsupported
 	}
 
 	_ = session.Close()
 	str := parseHash(outputBytes)
-	if r == fs.HashMD5 {
+	if r == hash.HashMD5 {
 		o.md5sum = &str
-	} else if r == fs.HashSHA1 {
+	} else if r == hash.HashSHA1 {
 		o.sha1sum = &str
 	}
 	return str, nil
@@ -812,7 +815,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
 	if err != nil {
 		return errors.Wrap(err, "SetModTime")
 	}
-	if fs.ConfigFileGetBool(o.fs.name, "set_modtime", true) {
+	if config.FileGetBool(o.fs.name, "set_modtime", true) {
 		err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
 		o.fs.putSftpConnection(&c, err)
 		if err != nil {
diff --git a/backend/swift/swift.go b/backend/swift/swift.go
index da058e3be..7f3037454 100644
--- a/backend/swift/swift.go
+++ b/backend/swift/swift.go
@@ -14,6 +14,13 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/operations"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/swift"
 	"github.com/pkg/errors"
 )
@@ -118,7 +125,7 @@ func init() {
 		},
 		},
 	})
-	fs.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
+	flags.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
 }
 
 // Fs represents a remote swift server
@@ -191,24 +198,24 @@ func parsePath(path string) (container, directory string, err error) {
 func swiftConnection(name string) (*swift.Connection, error) {
 	c := &swift.Connection{
 		// Keep these in the same order as the Config for ease of checking
-		UserName:       fs.ConfigFileGet(name, "user"),
-		ApiKey:         fs.ConfigFileGet(name, "key"),
-		AuthUrl:        fs.ConfigFileGet(name, "auth"),
-		UserId:         fs.ConfigFileGet(name, "user_id"),
-		Domain:         fs.ConfigFileGet(name, "domain"),
-		Tenant:         fs.ConfigFileGet(name, "tenant"),
-		TenantId:       fs.ConfigFileGet(name, "tenant_id"),
-		TenantDomain:   fs.ConfigFileGet(name, "tenant_domain"),
-		Region:         fs.ConfigFileGet(name, "region"),
-		StorageUrl:     fs.ConfigFileGet(name, "storage_url"),
-		AuthToken:      fs.ConfigFileGet(name, "auth_token"),
-		AuthVersion:    fs.ConfigFileGetInt(name, "auth_version", 0),
-		EndpointType:   swift.EndpointType(fs.ConfigFileGet(name, "endpoint_type", "public")),
+		UserName:       config.FileGet(name, "user"),
+		ApiKey:         config.FileGet(name, "key"),
+		AuthUrl:        config.FileGet(name, "auth"),
+		UserId:         config.FileGet(name, "user_id"),
+		Domain:         config.FileGet(name, "domain"),
+		Tenant:         config.FileGet(name, "tenant"),
+		TenantId:       config.FileGet(name, "tenant_id"),
+		TenantDomain:   config.FileGet(name, "tenant_domain"),
+		Region:         config.FileGet(name, "region"),
+		StorageUrl:     config.FileGet(name, "storage_url"),
+		AuthToken:      config.FileGet(name, "auth_token"),
+		AuthVersion:    config.FileGetInt(name, "auth_version", 0),
+		EndpointType:   swift.EndpointType(config.FileGet(name, "endpoint_type", "public")),
 		ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
 		Timeout:        10 * fs.Config.Timeout,        // Use the timeouts in the transport
-		Transport:      fs.Config.Transport(),
+		Transport:      fshttp.NewTransport(fs.Config),
 	}
-	if fs.ConfigFileGetBool(name, "env_auth", false) {
+	if config.FileGetBool(name, "env_auth", false) {
 		err := c.ApplyEnvironment()
 		if err != nil {
 			return nil, errors.Wrap(err, "failed to read environment variables")
@@ -466,7 +473,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	if f.container == "" {
 		return errors.New("container needed for recursive list")
 	}
-	list := fs.NewListRHelper(callback)
+	list := walk.NewListRHelper(callback)
 	err = f.list(dir, true, func(entry fs.DirEntry) error {
 		return list.Add(entry)
 	})
@@ -549,7 +556,7 @@ func (f *Fs) Purge() error {
 	toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
 	delErr := make(chan error, 1)
 	go func() {
-		delErr <- fs.DeleteFiles(toBeDeleted)
+		delErr <- operations.DeleteFiles(toBeDeleted)
 	}()
 	err := f.list("", true, func(entry fs.DirEntry) error {
 		if o, ok := entry.(*Object); ok {
@@ -596,8 +603,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
 }
 
 // ------------------------------------------------------------
@@ -621,9 +628,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the Md5sum of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	isDynamicLargeObject, err := o.isDynamicLargeObject()
 	if err != nil {
@@ -855,7 +862,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
 // The new object may have been created if an error is returned
 func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
 	if o.fs.container == "" {
-		return fs.FatalError(errors.New("container name needed in remote"))
+		return fserrors.FatalError(errors.New("container name needed in remote"))
 	}
 	err := o.fs.Mkdir("")
 	if err != nil {
diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index 2d963e580..e4a3d264e 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -30,11 +30,12 @@ import (
 
 	"github.com/ncw/rclone/backend/webdav/api"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/pacer"
 	"github.com/ncw/rclone/lib/rest"
-	"github.com/ncw/rclone/pacer"
-	"github.com/ncw/rclone/rest"
-	"github.com/ncw/rclone/webdav/api"
 	"github.com/pkg/errors"
 )
 
@@ -159,7 +160,7 @@ var retryErrorCodes = []int{
 // shouldRetry returns a boolean as to whether this resp and err
 // deserve to be retried.  It returns the err as a convenience
 func shouldRetry(resp *http.Response, err error) (bool, error) {
-	return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
+	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
 }
 
 // itemIsDir returns true if the item is a directory
@@ -250,21 +251,21 @@ func (o *Object) filePath() string {
 
 // NewFs constructs an Fs from the path, container:path
 func NewFs(name, root string) (fs.Fs, error) {
-	endpoint := fs.ConfigFileGet(name, "url")
+	endpoint := config.FileGet(name, "url")
 	if !strings.HasSuffix(endpoint, "/") {
 		endpoint += "/"
 	}
 
-	user := fs.ConfigFileGet(name, "user")
-	pass := fs.ConfigFileGet(name, "pass")
+	user := config.FileGet(name, "user")
+	pass := config.FileGet(name, "pass")
 	if pass != "" {
 		var err error
-		pass, err = fs.Reveal(pass)
+		pass, err = config.Reveal(pass)
 		if err != nil {
 			return nil, errors.Wrap(err, "couldn't decrypt password")
 		}
 	}
-	vendor := fs.ConfigFileGet(name, "vendor")
+	vendor := config.FileGet(name, "vendor")
 
 	// Parse the endpoint
 	u, err := url.Parse(endpoint)
@@ -277,7 +278,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 		root:        root,
 		endpoint:    u,
 		endpointURL: u.String(),
-		srv:         rest.NewClient(fs.Config.Client()).SetRoot(u.String()).SetUserPass(user, pass),
+		srv:         rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()).SetUserPass(user, pass),
 		pacer:       pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
 		user:        user,
 		pass:        pass,
@@ -765,8 +766,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashNone)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashNone)
 }
 
 // ------------------------------------------------------------
@@ -790,9 +791,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the SHA-1 of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashSHA1 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashSHA1 {
+		return "", hash.ErrHashUnsupported
 	}
 	return o.sha1, nil
 }
diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go
index e028a0f18..9586f0745 100644
--- a/backend/yandex/yandex.go
+++ b/backend/yandex/yandex.go
@@ -15,9 +15,11 @@ import (
 
 	yandex "github.com/ncw/rclone/backend/yandex/api"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/oauthutil"
-	"github.com/ncw/rclone/oauthutil"
-	yandex "github.com/ncw/rclone/yandex/api"
+	"github.com/ncw/rclone/lib/readers"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2"
 )
@@ -37,7 +39,7 @@ var (
 			TokenURL: "https://oauth.yandex.com/token",     //same as https://oauth.yandex.ru/token
 		},
 		ClientID:     rcloneClientID,
-		ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
+		ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
 		RedirectURL:  oauthutil.RedirectURL,
 	}
 )
@@ -55,10 +57,10 @@ func init() {
 			}
 		},
 		Options: []fs.Option{{
-			Name: fs.ConfigClientID,
+			Name: config.ConfigClientID,
 			Help: "Yandex Client Id - leave blank normally.",
 		}, {
-			Name: fs.ConfigClientSecret,
+			Name: config.ConfigClientSecret,
 			Help: "Yandex Client Secret - leave blank normally.",
 		}},
 	})
@@ -109,7 +111,7 @@ func (f *Fs) Features() *fs.Features {
 // read access token from ConfigFile string
 func getAccessToken(name string) (*oauth2.Token, error) {
 	// Read the token from the config file
-	tokenConfig := fs.ConfigFileGet(name, "token")
+	tokenConfig := config.FileGet(name, "token")
 	//Get access token from config string
 	decoder := json.NewDecoder(strings.NewReader(tokenConfig))
 	var result *oauth2.Token
@@ -129,7 +131,7 @@ func NewFs(name, root string) (fs.Fs, error) {
 	}
 
 	//create new client
-	yandexDisk := yandex.NewClient(token.AccessToken, fs.Config.Client())
+	yandexDisk := yandex.NewClient(token.AccessToken, fshttp.NewClient(fs.Config))
 
 	f := &Fs{
 		name: name,
@@ -487,8 +489,8 @@ func (f *Fs) CleanUp() error {
 }
 
 // Hashes returns the supported hash sets.
-func (f *Fs) Hashes() fs.HashSet {
-	return fs.HashSet(fs.HashMD5)
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.HashMD5)
 }
 
 // ------------------------------------------------------------
@@ -512,9 +514,9 @@ func (o *Object) Remote() string {
 }
 
 // Hash returns the Md5sum of an object returning a lowercase hex string
-func (o *Object) Hash(t fs.HashType) (string, error) {
-	if t != fs.HashMD5 {
-		return "", fs.ErrHashUnsupported
+func (o *Object) Hash(t hash.Type) (string, error) {
+	if t != hash.HashMD5 {
+		return "", hash.ErrHashUnsupported
 	}
 	return o.md5sum, nil
 }
@@ -578,7 +580,7 @@ func (o *Object) remotePath() string {
 //
 // The new object may have been created if an error is returned
 func (o *Object) Update(in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
-	in := fs.NewCountingReader(in0)
+	in := readers.NewCountingReader(in0)
 	modTime := src.ModTime()
 
 	remote := o.remotePath()
diff --git a/cmd/authorize/authorize.go b/cmd/authorize/authorize.go
index 5661ad52e..745927c8e 100644
--- a/cmd/authorize/authorize.go
+++ b/cmd/authorize/authorize.go
@@ -2,7 +2,7 @@ package authorize
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/spf13/cobra"
 )
 
@@ -19,6 +19,6 @@ rclone from a machine with a browser - use as instructed by
 rclone config.`,
 	Run: func(command *cobra.Command, args []string) {
 		cmd.CheckArgs(1, 3, command, args)
-		fs.Authorize(args)
+		config.Authorize(args)
 	},
 }
diff --git a/cmd/cachestats/cachestats.go b/cmd/cachestats/cachestats.go
index d47291016..617ef1253 100644
--- a/cmd/cachestats/cachestats.go
+++ b/cmd/cachestats/cachestats.go
@@ -9,6 +9,7 @@ import (
 	"github.com/ncw/rclone/backend/cache"
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 )
@@ -32,9 +33,9 @@ Print cache stats for a remote in JSON format
 			return
 		}
 
-		if !fs.ConfigFileGetBool(configName, "read_only", false) {
-			fs.ConfigFileSet(configName, "read_only", "true")
-			defer fs.ConfigFileDeleteKey(configName, "read_only")
+		if !config.FileGetBool(configName, "read_only", false) {
+			config.FileSet(configName, "read_only", "true")
+			defer config.FileDeleteKey(configName, "read_only")
 		}
 
 		fsrc := cmd.NewFsSrc(args)
diff --git a/cmd/cat/cat.go b/cmd/cat/cat.go
index 137e05631..4ad188021 100644
--- a/cmd/cat/cat.go
+++ b/cmd/cat/cat.go
@@ -7,7 +7,7 @@ import (
 	"os"
 
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -74,7 +74,7 @@ Note that if offset is negative it will count from the end, so
 			w = ioutil.Discard
 		}
 		cmd.Run(false, false, command, func() error {
-			return fs.Cat(fsrc, w, offset, count)
+			return operations.Cat(fsrc, w, offset, count)
 		})
 	},
 }
diff --git a/cmd/check/check.go b/cmd/check/check.go
index a95df902c..ebace4f2a 100644
--- a/cmd/check/check.go
+++ b/cmd/check/check.go
@@ -2,7 +2,7 @@ package check
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -37,9 +37,9 @@ to check all the data.
 		fsrc, fdst := cmd.NewFsSrcDst(args)
 		cmd.Run(false, false, command, func() error {
 			if download {
-				return fs.CheckDownload(fdst, fsrc)
+				return operations.CheckDownload(fdst, fsrc)
 			}
-			return fs.Check(fdst, fsrc)
+			return operations.Check(fdst, fsrc)
 		})
 	},
 }
diff --git a/cmd/cleanup/cleanup.go b/cmd/cleanup/cleanup.go
index 696d9f0e4..c1b894e99 100644
--- a/cmd/cleanup/cleanup.go
+++ b/cmd/cleanup/cleanup.go
@@ -2,7 +2,7 @@ package cleanup
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -21,7 +21,7 @@ versions. Not supported by all remotes.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(true, false, command, func() error {
-			return fs.CleanUp(fsrc)
+			return operations.CleanUp(fsrc)
 		})
 	},
 }
diff --git a/cmd/cmd.go b/cmd/cmd.go
index 14e31312e..481cc93ee 100644
--- a/cmd/cmd.go
+++ b/cmd/cmd.go
@@ -21,17 +21,26 @@ import (
 	"github.com/spf13/pflag"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/configflags"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fs/filter/filterflags"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/fspath"
+	fslog "github.com/ncw/rclone/fs/log"
 )
 
 // Globals
 var (
 	// Flags
-	cpuProfile    = fs.StringP("cpuprofile", "", "", "Write cpu profile to file")
-	memProfile    = fs.StringP("memprofile", "", "", "Write memory profile to file")
-	statsInterval = fs.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
-	dataRateUnit  = fs.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
+	cpuProfile    = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
+	memProfile    = flags.StringP("memprofile", "", "", "Write memory profile to file")
+	statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
+	dataRateUnit  = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
 	version       bool
-	retries       = fs.IntP("retries", "", 3, "Retry operations this many times if they fail")
+	retries       = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
 	// Errors
 	errorCommandNotFound    = errors.New("command not found")
 	errorUncategorized      = errors.New("uncategorized error")
@@ -113,6 +122,10 @@ func runRoot(cmd *cobra.Command, args []string) {
 }
 
 func init() {
+	// Add global flags
+	configflags.AddFlags(pflag.CommandLine)
+	filterflags.AddFlags(pflag.CommandLine)
+
 	Root.Run = runRoot
 	Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
 	cobra.OnInitialize(initConfig)
@@ -131,7 +144,7 @@ func ShowVersion() {
 func newFsFile(remote string) (fs.Fs, string) {
 	fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
 	if err != nil {
-		fs.Stats.Error(err)
+		fs.CountError(err)
 		log.Fatalf("Failed to create file system for %q: %v", remote, err)
 	}
 	f, err := fsInfo.NewFs(configName, fsPath)
@@ -141,7 +154,7 @@ func newFsFile(remote string) (fs.Fs, string) {
 	case nil:
 		return f, ""
 	default:
-		fs.Stats.Error(err)
+		fs.CountError(err)
 		log.Fatalf("Failed to create file system for %q: %v", remote, err)
 	}
 	return nil, ""
@@ -155,15 +168,15 @@ func newFsFile(remote string) (fs.Fs, string) {
 func newFsSrc(remote string) (fs.Fs, string) {
 	f, fileName := newFsFile(remote)
 	if fileName != "" {
-		if !fs.Config.Filter.InActive() {
+		if !filter.Active.InActive() {
 			err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			log.Fatalf(err.Error())
 		}
 		// Limit transfers to this file
-		err := fs.Config.Filter.AddFile(fileName)
+		err := filter.Active.AddFile(fileName)
 		if err != nil {
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			log.Fatalf("Failed to limit to single file %q: %v", remote, err)
 		}
 		// Set --no-traverse as only one file
@@ -178,7 +191,7 @@ func newFsSrc(remote string) (fs.Fs, string) {
 func newFsDst(remote string) fs.Fs {
 	f, err := fs.NewFs(remote)
 	if err != nil {
-		fs.Stats.Error(err)
+		fs.CountError(err)
 		log.Fatalf("Failed to create file system for %q: %v", remote, err)
 	}
 	return f
@@ -201,7 +214,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
 	// If file exists then srcFileName != "", however if the file
 	// doesn't exist then we assume it is a directory...
 	if srcFileName != "" {
-		dstRemote, dstFileName = fs.RemoteSplit(dstRemote)
+		dstRemote, dstFileName = fspath.RemoteSplit(dstRemote)
 		if dstRemote == "" {
 			dstRemote = "."
 		}
@@ -212,11 +225,11 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
 	fdst, err := fs.NewFs(dstRemote)
 	switch err {
 	case fs.ErrorIsFile:
-		fs.Stats.Error(err)
+		fs.CountError(err)
 		log.Fatalf("Source doesn't exist or is a directory and destination is a file")
 	case nil:
 	default:
-		fs.Stats.Error(err)
+		fs.CountError(err)
 		log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
 	}
 	fs.CalculateModifyWindow(fdst, fsrc)
@@ -241,7 +254,7 @@ func NewFsDst(args []string) fs.Fs {
 
 // NewFsDstFile creates a new dst fs with a destination file name from the arguments
 func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
-	dstRemote, dstFileName := fs.RemoteSplit(args[0])
+	dstRemote, dstFileName := fspath.RemoteSplit(args[0])
 	if dstRemote == "" {
 		dstRemote = "."
 	}
@@ -274,27 +287,27 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
 	}
 	for try := 1; try <= *retries; try++ {
 		err = f()
-		if !Retry || (err == nil && !fs.Stats.Errored()) {
+		if !Retry || (err == nil && !accounting.Stats.Errored()) {
 			if try > 1 {
 				fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
 			}
 			break
 		}
-		if fs.IsFatalError(err) {
+		if fserrors.IsFatalError(err) {
 			fs.Errorf(nil, "Fatal error received - not attempting retries")
 			break
 		}
-		if fs.IsNoRetryError(err) {
+		if fserrors.IsNoRetryError(err) {
 			fs.Errorf(nil, "Can't retry this error - not attempting retries")
 			break
 		}
 		if err != nil {
-			fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
+			fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, accounting.Stats.GetErrors(), err)
 		} else {
-			fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
+			fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, accounting.Stats.GetErrors())
 		}
 		if try < *retries {
-			fs.Stats.ResetErrors()
+			accounting.Stats.ResetErrors()
 		}
 	}
 	if showStats {
@@ -304,12 +317,12 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
 		log.Printf("Failed to %s: %v", cmd.Name(), err)
 		resolveExitCode(err)
 	}
-	if showStats && (fs.Stats.Errored() || *statsInterval > 0) {
-		fs.Stats.Log()
+	if showStats && (accounting.Stats.Errored() || *statsInterval > 0) {
+		accounting.Stats.Log()
 	}
 	fs.Debugf(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
-	if fs.Stats.Errored() {
-		resolveExitCode(fs.Stats.GetLastError())
+	if accounting.Stats.Errored() {
+		resolveExitCode(accounting.Stats.GetLastError())
 	}
 }
 
@@ -339,7 +352,7 @@ func StartStats() chan struct{} {
 			for {
 				select {
 				case <-ticker.C:
-					fs.Stats.Log()
+					accounting.Stats.Log()
 				case <-stopStats:
 					ticker.Stop()
 					return
@@ -353,10 +366,20 @@ func StartStats() chan struct{} {
 // initConfig is run by cobra after initialising the flags
 func initConfig() {
 	// Start the logger
-	fs.InitLogging()
+	fslog.InitLogging()
+
+	// Finish parsing any command line flags
+	configflags.SetFlags()
 
 	// Load the rest of the config now we have started the logger
-	fs.LoadConfig()
+	config.LoadConfig()
+
+	// Load filters
+	var err error
+	filter.Active, err = filter.NewFilter(&filterflags.Opt)
+	if err != nil {
+		log.Fatalf("Failed to load filters: %v", err)
+	}
 
 	// Write the args for debug purposes
 	fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
@@ -366,12 +389,12 @@ func initConfig() {
 		fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
 		f, err := os.Create(*cpuProfile)
 		if err != nil {
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			log.Fatal(err)
 		}
 		err = pprof.StartCPUProfile(f)
 		if err != nil {
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			log.Fatal(err)
 		}
 		AtExit(func() {
@@ -385,17 +408,17 @@ func initConfig() {
 			fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
 			f, err := os.Create(*memProfile)
 			if err != nil {
-				fs.Stats.Error(err)
+				fs.CountError(err)
 				log.Fatal(err)
 			}
 			err = pprof.WriteHeapProfile(f)
 			if err != nil {
-				fs.Stats.Error(err)
+				fs.CountError(err)
 				log.Fatal(err)
 			}
 			err = f.Close()
 			if err != nil {
-				fs.Stats.Error(err)
+				fs.CountError(err)
 				log.Fatal(err)
 			}
 		})
@@ -423,11 +446,11 @@ func resolveExitCode(err error) {
 		os.Exit(exitCodeFileNotFound)
 	case err == errorUncategorized:
 		os.Exit(exitCodeUncategorizedError)
-	case fs.ShouldRetry(err):
+	case fserrors.ShouldRetry(err):
 		os.Exit(exitCodeRetryError)
-	case fs.IsNoRetryError(err):
+	case fserrors.IsNoRetryError(err):
 		os.Exit(exitCodeNoRetryError)
-	case fs.IsFatalError(err):
+	case fserrors.IsFatalError(err):
 		os.Exit(exitCodeFatalError)
 	default:
 		os.Exit(exitCodeUsageError)
diff --git a/cmd/cmount/fs.go b/cmd/cmount/fs.go
index 5af4acf06..8b159713d 100644
--- a/cmd/cmount/fs.go
+++ b/cmd/cmount/fs.go
@@ -14,6 +14,7 @@ import (
 
 	"github.com/billziss-gh/cgofuse/fuse"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"github.com/ncw/rclone/vfs"
 	"github.com/ncw/rclone/vfs/vfsflags"
 	"github.com/pkg/errors"
@@ -178,7 +179,7 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
 
 // Init is called after the filesystem is ready
 func (fsys *FS) Init() {
-	defer fs.Trace(fsys.f, "")("")
+	defer log.Trace(fsys.f, "")("")
 	close(fsys.ready)
 }
 
@@ -186,12 +187,12 @@ func (fsys *FS) Init() {
 // the file system is terminated the file system may not receive the
 // Destroy call).
 func (fsys *FS) Destroy() {
-	defer fs.Trace(fsys.f, "")("")
+	defer log.Trace(fsys.f, "")("")
 }
 
 // Getattr reads the attributes for path
 func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
-	defer fs.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
+	defer log.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
 	node, _, errc := fsys.getNode(path, fh)
 	if errc == 0 {
 		errc = fsys.stat(node, stat)
@@ -201,7 +202,7 @@ func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
 
 // Opendir opens path as a directory
 func (fsys *FS) Opendir(path string) (errc int, fh uint64) {
-	defer fs.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
+	defer log.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
 	handle, err := fsys.VFS.OpenFile(path, os.O_RDONLY, 0777)
 	if errc != 0 {
 		return translateError(err), fhUnset
@@ -215,7 +216,7 @@ func (fsys *FS) Readdir(dirPath string,
 	ofst int64,
 	fh uint64) (errc int) {
 	itemsRead := -1
-	defer fs.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
+	defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
 
 	node, errc := fsys.getHandle(fh)
 	if errc != 0 {
@@ -254,13 +255,13 @@ func (fsys *FS) Readdir(dirPath string,
 
 // Releasedir finished reading the directory
 func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
-	defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
+	defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
 	return fsys.closeHandle(fh)
 }
 
 // Statfs reads overall stats on the filessystem
 func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
-	defer fs.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
+	defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
 	const blockSize = 4096
 	fsBlocks := uint64(1 << 50)
 	if runtime.GOOS == "windows" {
@@ -279,7 +280,7 @@ func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
 
 // Open opens a file
 func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
-	defer fs.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
+	defer log.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
 
 	// translate the fuse flags to os flags
 	flags = translateOpenFlags(flags) | os.O_CREATE
@@ -293,7 +294,7 @@ func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
 
 // Create creates and opens a file.
 func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
-	defer fs.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
+	defer log.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
 	leaf, parentDir, errc := fsys.lookupParentDir(filePath)
 	if errc != 0 {
 		return errc, fhUnset
@@ -313,7 +314,7 @@ func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh ui
 
 // Truncate truncates a file to size
 func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
-	defer fs.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
+	defer log.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
 	node, handle, errc := fsys.getNode(path, fh)
 	if errc != 0 {
 		return errc
@@ -332,7 +333,7 @@ func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
 
 // Read data from file handle
 func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
-	defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
+	defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
 	handle, errc := fsys.getHandle(fh)
 	if errc != 0 {
 		return errc
@@ -348,7 +349,7 @@ func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
 
 // Write data to file handle
 func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
-	defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
+	defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
 	handle, errc := fsys.getHandle(fh)
 	if errc != 0 {
 		return errc
@@ -362,7 +363,7 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
 
 // Flush flushes an open file descriptor or path
 func (fsys *FS) Flush(path string, fh uint64) (errc int) {
-	defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
+	defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
 	handle, errc := fsys.getHandle(fh)
 	if errc != 0 {
 		return errc
@@ -372,7 +373,7 @@ func (fsys *FS) Flush(path string, fh uint64) (errc int) {
 
 // Release closes the file if still open
 func (fsys *FS) Release(path string, fh uint64) (errc int) {
-	defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
+	defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
 	handle, errc := fsys.getHandle(fh)
 	if errc != 0 {
 		return errc
@@ -383,7 +384,7 @@ func (fsys *FS) Release(path string, fh uint64) (errc int) {
 
 // Unlink removes a file.
 func (fsys *FS) Unlink(filePath string) (errc int) {
-	defer fs.Trace(filePath, "")("errc=%d", &errc)
+	defer log.Trace(filePath, "")("errc=%d", &errc)
 	leaf, parentDir, errc := fsys.lookupParentDir(filePath)
 	if errc != 0 {
 		return errc
@@ -393,7 +394,7 @@ func (fsys *FS) Unlink(filePath string) (errc int) {
 
 // Mkdir creates a directory.
 func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
-	defer fs.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
+	defer log.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
 	leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
 	if errc != 0 {
 		return errc
@@ -404,7 +405,7 @@ func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
 
 // Rmdir removes a directory
 func (fsys *FS) Rmdir(dirPath string) (errc int) {
-	defer fs.Trace(dirPath, "")("errc=%d", &errc)
+	defer log.Trace(dirPath, "")("errc=%d", &errc)
 	leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
 	if errc != 0 {
 		return errc
@@ -414,13 +415,13 @@ func (fsys *FS) Rmdir(dirPath string) (errc int) {
 
 // Rename renames a file.
 func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
-	defer fs.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
+	defer log.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
 	return translateError(fsys.VFS.Rename(oldPath, newPath))
 }
 
 // Utimens changes the access and modification times of a file.
 func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
-	defer fs.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
+	defer log.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
 	node, errc := fsys.lookupNode(path)
 	if errc != 0 {
 		return errc
@@ -436,59 +437,59 @@ func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
 
 // Mknod creates a file node.
 func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) {
-	defer fs.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
+	defer log.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
 	return -fuse.ENOSYS
 }
 
 // Fsync synchronizes file contents.
 func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
-	defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
+	defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
 	// This is a no-op for rclone
 	return 0
 }
 
 // Link creates a hard link to a file.
 func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
-	defer fs.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
+	defer log.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
 	return -fuse.ENOSYS
 }
 
 // Symlink creates a symbolic link.
 func (fsys *FS) Symlink(target string, newpath string) (errc int) {
-	defer fs.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
+	defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
 	return -fuse.ENOSYS
 }
 
 // Readlink reads the target of a symbolic link.
 func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
-	defer fs.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
+	defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
 	return -fuse.ENOSYS, ""
 }
 
 // Chmod changes the permission bits of a file.
 func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
-	defer fs.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
+	defer log.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
 	// This is a no-op for rclone
 	return 0
 }
 
 // Chown changes the owner and group of a file.
 func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
-	defer fs.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
+	defer log.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
 	// This is a no-op for rclone
 	return 0
 }
 
 // Access checks file access permissions.
 func (fsys *FS) Access(path string, mask uint32) (errc int) {
-	defer fs.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
+	defer log.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
 	// This is a no-op for rclone
 	return 0
 }
 
 // Fsyncdir synchronizes directory contents.
 func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) {
-	defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
+	defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
 	// This is a no-op for rclone
 	return 0
 }
diff --git a/cmd/config/config.go b/cmd/config/config.go
index 90033dedd..09216481b 100644
--- a/cmd/config/config.go
+++ b/cmd/config/config.go
@@ -2,7 +2,7 @@ package config
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/spf13/cobra"
 )
 
@@ -28,7 +28,7 @@ password to protect your configuration.
 `,
 	Run: func(command *cobra.Command, args []string) {
 		cmd.CheckArgs(0, 0, command, args)
-		fs.EditConfig()
+		config.EditConfig()
 	},
 }
 
@@ -44,7 +44,7 @@ var configFileCommand = &cobra.Command{
 	Short: `Show path of configuration file in use.`,
 	Run: func(command *cobra.Command, args []string) {
 		cmd.CheckArgs(0, 0, command, args)
-		fs.ShowConfigLocation()
+		config.ShowConfigLocation()
 	},
 }
 
@@ -54,9 +54,9 @@ var configShowCommand = &cobra.Command{
 	Run: func(command *cobra.Command, args []string) {
 		cmd.CheckArgs(0, 1, command, args)
 		if len(args) == 0 {
-			fs.ShowConfig()
+			config.ShowConfig()
 		} else {
-			fs.ShowRemote(args[0])
+			config.ShowRemote(args[0])
 		}
 	},
 }
@@ -66,7 +66,7 @@ var configDumpCommand = &cobra.Command{
 	Short: `Dump the config file as JSON.`,
 	RunE: func(command *cobra.Command, args []string) error {
 		cmd.CheckArgs(0, 0, command, args)
-		return fs.ConfigDump()
+		return config.Dump()
 	},
 }
 
@@ -75,7 +75,7 @@ var configProvidersCommand = &cobra.Command{
 	Short: `List in JSON format all the providers and options.`,
 	RunE: func(command *cobra.Command, args []string) error {
 		cmd.CheckArgs(0, 0, command, args)
-		return fs.JSONListProviders()
+		return config.JSONListProviders()
 	},
 }
 
@@ -93,7 +93,7 @@ you would do:
 `,
 	RunE: func(command *cobra.Command, args []string) error {
 		cmd.CheckArgs(2, 256, command, args)
-		return fs.CreateRemote(args[0], args[1], args[2:])
+		return config.CreateRemote(args[0], args[1], args[2:])
 	},
 }
 
@@ -110,7 +110,7 @@ For example to update the env_auth field of a remote of name myremote you would
 `,
 	RunE: func(command *cobra.Command, args []string) error {
 		cmd.CheckArgs(3, 256, command, args)
-		return fs.UpdateRemote(args[0], args[1:])
+		return config.UpdateRemote(args[0], args[1:])
 	},
 }
 
@@ -119,7 +119,7 @@ var configDeleteCommand = &cobra.Command{
 	Short: `Delete an existing remote <name>.`,
 	Run: func(command *cobra.Command, args []string) {
 		cmd.CheckArgs(1, 1, command, args)
-		fs.DeleteRemote(args[0])
+		config.DeleteRemote(args[0])
 	},
 }
 
@@ -136,6 +136,6 @@ For example to set password of a remote of name myremote you would do:
 `,
 	RunE: func(command *cobra.Command, args []string) error {
 		cmd.CheckArgs(3, 256, command, args)
-		return fs.PasswordRemote(args[0], args[1:])
+		return config.PasswordRemote(args[0], args[1:])
 	},
 }
diff --git a/cmd/copy/copy.go b/cmd/copy/copy.go
index 920b80cff..a1c59c4f1 100644
--- a/cmd/copy/copy.go
+++ b/cmd/copy/copy.go
@@ -2,7 +2,7 @@ package copy
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/sync"
 	"github.com/spf13/cobra"
 )
 
@@ -57,7 +57,7 @@ the destination directory or not.
 		cmd.CheckArgs(2, 2, command, args)
 		fsrc, fdst := cmd.NewFsSrcDst(args)
 		cmd.Run(true, true, command, func() error {
-			return fs.CopyDir(fdst, fsrc)
+			return sync.CopyDir(fdst, fsrc)
 		})
 	},
 }
diff --git a/cmd/copyto/copyto.go b/cmd/copyto/copyto.go
index 8ea8f2296..254f3ec9c 100644
--- a/cmd/copyto/copyto.go
+++ b/cmd/copyto/copyto.go
@@ -2,7 +2,8 @@ package copyto
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
+	"github.com/ncw/rclone/fs/sync"
 	"github.com/spf13/cobra"
 )
 
@@ -45,9 +46,9 @@ destination.
 		fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
 		cmd.Run(true, true, command, func() error {
 			if srcFileName == "" {
-				return fs.CopyDir(fdst, fsrc)
+				return sync.CopyDir(fdst, fsrc)
 			}
-			return fs.CopyFile(fdst, fsrc, dstFileName, srcFileName)
+			return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
 		})
 	},
 }
diff --git a/cmd/cryptcheck/cryptcheck.go b/cmd/cryptcheck/cryptcheck.go
index 0a387a907..f678d7fac 100644
--- a/cmd/cryptcheck/cryptcheck.go
+++ b/cmd/cryptcheck/cryptcheck.go
@@ -4,6 +4,8 @@ import (
 	"github.com/ncw/rclone/backend/crypt"
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 )
@@ -58,7 +60,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
 	// Find a hash to use
 	funderlying := fcrypt.UnWrap()
 	hashType := funderlying.Hashes().GetOne()
-	if hashType == fs.HashNone {
+	if hashType == hash.HashNone {
 		return errors.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root())
 	}
 	fs.Infof(nil, "Using %v for hash comparisons", hashType)
@@ -72,7 +74,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
 		underlyingDst := cryptDst.UnWrap()
 		underlyingHash, err := underlyingDst.Hash(hashType)
 		if err != nil {
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
 			return true, false
 		}
@@ -81,7 +83,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
 		}
 		cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType)
 		if err != nil {
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			fs.Errorf(dst, "Error computing hash: %v", err)
 			return true, false
 		}
@@ -90,7 +92,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
 		}
 		if cryptHash != underlyingHash {
 			err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			fs.Errorf(src, err.Error())
 			return true, false
 		}
@@ -98,5 +100,5 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
 		return false, false
 	}
 
-	return fs.CheckFn(fcrypt, fsrc, checkIdentical)
+	return operations.CheckFn(fcrypt, fsrc, checkIdentical)
 }
diff --git a/cmd/cryptdecode/cryptdecode.go b/cmd/cryptdecode/cryptdecode.go
index be327fd5a..28cd351e8 100644
--- a/cmd/cryptdecode/cryptdecode.go
+++ b/cmd/cryptdecode/cryptdecode.go
@@ -6,6 +6,7 @@ import (
 	"github.com/ncw/rclone/backend/crypt"
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config/flags"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 )
@@ -17,8 +18,8 @@ var (
 
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
-	flags := commandDefinition.Flags()
-	fs.BoolVarP(flags, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
+	flagSet := commandDefinition.Flags()
+	flags.BoolVarP(flagSet, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/dbhashsum/dbhashsum.go b/cmd/dbhashsum/dbhashsum.go
index 3d7f82f13..d49ab4161 100644
--- a/cmd/dbhashsum/dbhashsum.go
+++ b/cmd/dbhashsum/dbhashsum.go
@@ -4,7 +4,7 @@ import (
 	"os"
 
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -25,7 +25,7 @@ The output is in the same format as md5sum and sha1sum.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			return fs.DropboxHashSum(fsrc, os.Stdout)
+			return operations.DropboxHashSum(fsrc, os.Stdout)
 		})
 	},
 }
diff --git a/cmd/dedupe/dedupe.go b/cmd/dedupe/dedupe.go
index 365fc033f..1a578da8d 100644
--- a/cmd/dedupe/dedupe.go
+++ b/cmd/dedupe/dedupe.go
@@ -4,12 +4,12 @@ import (
 	"log"
 
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
 var (
-	dedupeMode = fs.DeduplicateInteractive
+	dedupeMode = operations.DeduplicateInteractive
 )
 
 func init() {
@@ -111,7 +111,7 @@ Or
 		}
 		fdst := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			return fs.Deduplicate(fdst, dedupeMode)
+			return operations.Deduplicate(fdst, dedupeMode)
 		})
 	},
 }
diff --git a/cmd/delete/delete.go b/cmd/delete/delete.go
index 98deaea78..b1edc22e2 100644
--- a/cmd/delete/delete.go
+++ b/cmd/delete/delete.go
@@ -2,7 +2,7 @@ package delete
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -35,7 +35,7 @@ delete all files bigger than 100MBytes.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(true, false, command, func() error {
-			return fs.Delete(fsrc)
+			return operations.Delete(fsrc)
 		})
 	},
 }
diff --git a/cmd/info/info.go b/cmd/info/info.go
index 802cd4580..06385264d 100644
--- a/cmd/info/info.go
+++ b/cmd/info/info.go
@@ -14,6 +14,8 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/object"
 	"github.com/ncw/rclone/fstest"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
@@ -103,7 +105,7 @@ func (r *results) Print() {
 // writeFile writes a file with some random contents
 func (r *results) writeFile(path string) (fs.Object, error) {
 	contents := fstest.RandomString(50)
-	src := fs.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
+	src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
 	return r.f.Put(bytes.NewBufferString(contents), src)
 }
 
@@ -210,10 +212,10 @@ func (r *results) checkStreaming() {
 
 	contents := "thinking of test strings is hard"
 	buf := bytes.NewBufferString(contents)
-	hashIn := fs.NewMultiHasher()
+	hashIn := hash.NewMultiHasher()
 	in := io.TeeReader(buf, hashIn)
 
-	objIn := fs.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
+	objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
 	objR, err := putter(in, objIn)
 	if err != nil {
 		fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
@@ -223,15 +225,15 @@ func (r *results) checkStreaming() {
 
 	hashes := hashIn.Sums()
 	types := objR.Fs().Hashes().Array()
-	for _, hash := range types {
-		sum, err := objR.Hash(hash)
+	for _, Hash := range types {
+		sum, err := objR.Hash(Hash)
 		if err != nil {
-			fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", hash, err)
+			fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err)
 			r.canStream = false
 			return
 		}
-		if !fs.HashEquals(hashes[hash], sum) {
-			fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", hash, hashes[hash], sum)
+		if !hash.Equals(hashes[Hash], sum) {
+			fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", Hash, hashes[Hash], sum)
 			r.canStream = false
 			return
 		}
diff --git a/cmd/listremotes/listremotes.go b/cmd/listremotes/listremotes.go
index 2e855165c..f7ccd43b2 100644
--- a/cmd/listremotes/listremotes.go
+++ b/cmd/listremotes/listremotes.go
@@ -6,6 +6,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/spf13/cobra"
 )
 
@@ -29,7 +30,7 @@ When uses with the -l flag it lists the types too.
 `,
 	Run: func(command *cobra.Command, args []string) {
 		cmd.CheckArgs(0, 0, command, args)
-		remotes := fs.ConfigFileSections()
+		remotes := config.FileSections()
 		sort.Strings(remotes)
 		maxlen := 1
 		for _, remote := range remotes {
diff --git a/cmd/ls/ls.go b/cmd/ls/ls.go
index 4c153d59d..8938bc812 100644
--- a/cmd/ls/ls.go
+++ b/cmd/ls/ls.go
@@ -5,7 +5,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/cmd/ls/lshelp"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -24,7 +24,7 @@ readable format with size and path. Recurses by default.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			return fs.List(fsrc, os.Stdout)
+			return operations.List(fsrc, os.Stdout)
 		})
 	},
 }
diff --git a/cmd/lsd/lsd.go b/cmd/lsd/lsd.go
index 9c984f778..c12b38b15 100644
--- a/cmd/lsd/lsd.go
+++ b/cmd/lsd/lsd.go
@@ -5,7 +5,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/cmd/ls/lshelp"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -24,7 +24,7 @@ by default.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			return fs.ListDir(fsrc, os.Stdout)
+			return operations.ListDir(fsrc, os.Stdout)
 		})
 	},
 }
diff --git a/cmd/lsf/lsf.go b/cmd/lsf/lsf.go
index 9f549cdc5..392075278 100644
--- a/cmd/lsf/lsf.go
+++ b/cmd/lsf/lsf.go
@@ -8,6 +8,9 @@ import (
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/cmd/ls/lshelp"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/operations"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 )
@@ -17,7 +20,7 @@ var (
 	separator string
 	dirSlash  bool
 	recurse   bool
-	hashType  = fs.HashMD5
+	hashType  = hash.HashMD5
 	filesOnly bool
 	dirsOnly  bool
 )
@@ -84,7 +87,7 @@ putting it last is a good strategy.
 // Lsf lists all the objects in the path with modification time, size
 // and path in specific format.
 func Lsf(fsrc fs.Fs, out io.Writer) error {
-	var list fs.ListFormat
+	var list operations.ListFormat
 	list.SetSeparator(separator)
 	list.SetDirSlash(dirSlash)
 
@@ -103,9 +106,9 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
 		}
 	}
 
-	return fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
+	return walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
 		if err != nil {
-			fs.Stats.Error(err)
+			fs.CountError(err)
 			fs.Errorf(path, "error listing: %v", err)
 			return nil
 		}
@@ -120,7 +123,7 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
 					continue
 				}
 			}
-			fmt.Fprintln(out, fs.ListFormatted(&entry, &list))
+			fmt.Fprintln(out, operations.ListFormatted(&entry, &list))
 		}
 		return nil
 	})
diff --git a/cmd/lsf/lsf_test.go b/cmd/lsf/lsf_test.go
index 3b48ca39f..f32a4a674 100644
--- a/cmd/lsf/lsf_test.go
+++ b/cmd/lsf/lsf_test.go
@@ -5,6 +5,7 @@ import (
 	"testing"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/list"
 	"github.com/ncw/rclone/fstest"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -137,7 +138,7 @@ file3
 	err = Lsf(f, buf)
 	require.NoError(t, err)
 
-	items, _ := fs.ListDirSorted(f, true, "")
+	items, _ := list.DirSorted(f, true, "")
 	var expectedOutput string
 	for _, item := range items {
 		expectedOutput += item.ModTime().Format("2006-01-02 15:04:05") + "\n"
@@ -198,8 +199,8 @@ func TestWholeLsf(t *testing.T) {
 	err = Lsf(f, buf)
 	require.NoError(t, err)
 
-	items, _ := fs.ListDirSorted(f, true, "")
-	itemsInSubdir, _ := fs.ListDirSorted(f, true, "subdir")
+	items, _ := list.DirSorted(f, true, "")
+	itemsInSubdir, _ := list.DirSorted(f, true, "subdir")
 	var expectedOutput []string
 	for _, item := range items {
 		expectedOutput = append(expectedOutput, item.ModTime().Format("2006-01-02 15:04:05"))
diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go
index 30250e3e0..ed739ada9 100644
--- a/cmd/lsjson/lsjson.go
+++ b/cmd/lsjson/lsjson.go
@@ -10,6 +10,8 @@ import (
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/cmd/ls/lshelp"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 )
@@ -84,9 +86,9 @@ can be processed line by line as each item is written one to a line.
 		cmd.Run(false, false, command, func() error {
 			fmt.Println("[")
 			first := true
-			err := fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
+			err := walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
 				if err != nil {
-					fs.Stats.Error(err)
+					fs.CountError(err)
 					fs.Errorf(dirPath, "error listing: %v", err)
 					return nil
 				}
diff --git a/cmd/lsl/lsl.go b/cmd/lsl/lsl.go
index 4d284310c..83e17d16b 100644
--- a/cmd/lsl/lsl.go
+++ b/cmd/lsl/lsl.go
@@ -5,7 +5,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/cmd/ls/lshelp"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -24,7 +24,7 @@ readable format with modification time, size and path. Recurses by default.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			return fs.ListLong(fsrc, os.Stdout)
+			return operations.ListLong(fsrc, os.Stdout)
 		})
 	},
 }
diff --git a/cmd/md5sum/md5sum.go b/cmd/md5sum/md5sum.go
index e18a2602b..6c69b3391 100644
--- a/cmd/md5sum/md5sum.go
+++ b/cmd/md5sum/md5sum.go
@@ -4,7 +4,7 @@ import (
 	"os"
 
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -23,7 +23,7 @@ is in the same format as the standard md5sum tool produces.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			return fs.Md5sum(fsrc, os.Stdout)
+			return operations.Md5sum(fsrc, os.Stdout)
 		})
 	},
 }
diff --git a/cmd/memtest/memtest.go b/cmd/memtest/memtest.go
index 663060934..606ae28d8 100644
--- a/cmd/memtest/memtest.go
+++ b/cmd/memtest/memtest.go
@@ -6,6 +6,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -21,7 +22,7 @@ var commandDefintion = &cobra.Command{
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			objects, _, err := fs.Count(fsrc)
+			objects, _, err := operations.Count(fsrc)
 			if err != nil {
 				return err
 			}
@@ -30,7 +31,7 @@ var commandDefintion = &cobra.Command{
 			runtime.GC()
 			runtime.ReadMemStats(&before)
 			var mu sync.Mutex
-			err = fs.ListFn(fsrc, func(o fs.Object) {
+			err = operations.ListFn(fsrc, func(o fs.Object) {
 				mu.Lock()
 				objs = append(objs, o)
 				mu.Unlock()
diff --git a/cmd/mkdir/mkdir.go b/cmd/mkdir/mkdir.go
index 155bd170b..0c55a4237 100644
--- a/cmd/mkdir/mkdir.go
+++ b/cmd/mkdir/mkdir.go
@@ -2,7 +2,7 @@ package mkdir
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -17,7 +17,7 @@ var commandDefintion = &cobra.Command{
 		cmd.CheckArgs(1, 1, command, args)
 		fdst := cmd.NewFsDst(args)
 		cmd.Run(true, false, command, func() error {
-			return fs.Mkdir(fdst, "")
+			return operations.Mkdir(fdst, "")
 		})
 	},
 }
diff --git a/cmd/mount/dir.go b/cmd/mount/dir.go
index d0485f878..77086cba0 100644
--- a/cmd/mount/dir.go
+++ b/cmd/mount/dir.go
@@ -8,7 +8,7 @@ import (
 
 	"bazil.org/fuse"
 	fusefs "bazil.org/fuse/fs"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"github.com/ncw/rclone/vfs"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
@@ -24,7 +24,7 @@ var _ fusefs.Node = (*Dir)(nil)
 
 // Attr updates the attributes of a directory
 func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
-	defer fs.Trace(d, "")("attr=%+v, err=%v", a, &err)
+	defer log.Trace(d, "")("attr=%+v, err=%v", a, &err)
 	a.Gid = d.VFS().Opt.GID
 	a.Uid = d.VFS().Opt.UID
 	a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
@@ -43,7 +43,7 @@ var _ fusefs.NodeSetattrer = (*Dir)(nil)
 
 // Setattr handles attribute changes from FUSE. Currently supports ModTime only.
 func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
-	defer fs.Trace(d, "stat=%+v", req)("err=%v", &err)
+	defer log.Trace(d, "stat=%+v", req)("err=%v", &err)
 	if d.VFS().Opt.NoModTime {
 		return nil
 	}
@@ -67,7 +67,7 @@ var _ fusefs.NodeRequestLookuper = (*Dir)(nil)
 //
 // Lookup need not to handle the names "." and "..".
 func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
-	defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
+	defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
 	mnode, err := d.Dir.Stat(req.Name)
 	if err != nil {
 		return nil, translateError(err)
@@ -87,7 +87,7 @@ var _ fusefs.HandleReadDirAller = (*Dir)(nil)
 // ReadDirAll reads the contents of the directory
 func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
 	itemsRead := -1
-	defer fs.Trace(d, "")("item=%d, err=%v", &itemsRead, &err)
+	defer log.Trace(d, "")("item=%d, err=%v", &itemsRead, &err)
 	items, err := d.Dir.ReadDirAll()
 	if err != nil {
 		return nil, translateError(err)
@@ -111,7 +111,7 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
 
 // Create makes a new file
 func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
-	defer fs.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
+	defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
 	file, err := d.Dir.Create(req.Name)
 	if err != nil {
 		return nil, nil, translateError(err)
@@ -127,7 +127,7 @@ var _ fusefs.NodeMkdirer = (*Dir)(nil)
 
 // Mkdir creates a new directory
 func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) {
-	defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
+	defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
 	dir, err := d.Dir.Mkdir(req.Name)
 	if err != nil {
 		return nil, translateError(err)
@@ -141,7 +141,7 @@ var _ fusefs.NodeRemover = (*Dir)(nil)
 // the receiver, which must be a directory.  The entry to be removed
 // may correspond to a file (unlink) or to a directory (rmdir).
 func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
-	defer fs.Trace(d, "name=%q", req.Name)("err=%v", &err)
+	defer log.Trace(d, "name=%q", req.Name)("err=%v", &err)
 	err = d.Dir.RemoveName(req.Name)
 	if err != nil {
 		return translateError(err)
@@ -154,7 +154,7 @@ var _ fusefs.NodeRenamer = (*Dir)(nil)
 
 // Rename the file
 func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) {
-	defer fs.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err)
+	defer log.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err)
 	destDir, ok := newDir.(*Dir)
 	if !ok {
 		return errors.Errorf("Unknown Dir type %T", newDir)
@@ -173,7 +173,7 @@ var _ fusefs.NodeFsyncer = (*Dir)(nil)
 
 // Fsync the directory
 func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
-	defer fs.Trace(d, "")("err=%v", &err)
+	defer log.Trace(d, "")("err=%v", &err)
 	err = d.Dir.Sync()
 	if err != nil {
 		return translateError(err)
diff --git a/cmd/mount/file.go b/cmd/mount/file.go
index 94dfe369f..a625d4383 100644
--- a/cmd/mount/file.go
+++ b/cmd/mount/file.go
@@ -8,7 +8,7 @@ import (
 
 	"bazil.org/fuse"
 	fusefs "bazil.org/fuse/fs"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"github.com/ncw/rclone/vfs"
 	"golang.org/x/net/context"
 )
@@ -23,7 +23,7 @@ var _ fusefs.Node = (*File)(nil)
 
 // Attr fills out the attributes for the file
 func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
-	defer fs.Trace(f, "")("a=%+v, err=%v", a, &err)
+	defer log.Trace(f, "")("a=%+v, err=%v", a, &err)
 	modTime := f.File.ModTime()
 	Size := uint64(f.File.Size())
 	Blocks := (Size + 511) / 512
@@ -44,7 +44,7 @@ var _ fusefs.NodeSetattrer = (*File)(nil)
 
 // Setattr handles attribute changes from FUSE. Currently supports ModTime and Size only
 func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
-	defer fs.Trace(f, "a=%+v", req)("err=%v", &err)
+	defer log.Trace(f, "a=%+v", req)("err=%v", &err)
 	if !f.VFS().Opt.NoModTime {
 		if req.Valid.MtimeNow() {
 			err = f.File.SetModTime(time.Now())
@@ -64,7 +64,7 @@ var _ fusefs.NodeOpener = (*File)(nil)
 
 // Open the file for read or write
 func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fh fusefs.Handle, err error) {
-	defer fs.Trace(f, "flags=%v", req.Flags)("fh=%v, err=%v", &fh, &err)
+	defer log.Trace(f, "flags=%v", req.Flags)("fh=%v, err=%v", &fh, &err)
 
 	// fuse flags are based off syscall flags as are os flags, so
 	// should be compatible
@@ -91,6 +91,6 @@ var _ fusefs.NodeFsyncer = (*File)(nil)
 //
 // Note that we don't do anything except return OK
 func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
-	defer fs.Trace(f, "")("err=%v", &err)
+	defer log.Trace(f, "")("err=%v", &err)
 	return nil
 }
diff --git a/cmd/mount/fs.go b/cmd/mount/fs.go
index bf4a5bc5e..b892681d4 100644
--- a/cmd/mount/fs.go
+++ b/cmd/mount/fs.go
@@ -10,6 +10,7 @@ import (
 	"bazil.org/fuse"
 	fusefs "bazil.org/fuse/fs"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"github.com/ncw/rclone/vfs"
 	"github.com/ncw/rclone/vfs/vfsflags"
 	"github.com/pkg/errors"
@@ -36,7 +37,7 @@ func NewFS(f fs.Fs) *FS {
 
 // Root returns the root node
 func (f *FS) Root() (node fusefs.Node, err error) {
-	defer fs.Trace("", "")("node=%+v, err=%v", &node, &err)
+	defer log.Trace("", "")("node=%+v, err=%v", &node, &err)
 	root, err := f.VFS.Root()
 	if err != nil {
 		return nil, translateError(err)
@@ -50,7 +51,7 @@ var _ fusefs.FSStatfser = (*FS)(nil)
 // Statfs is called to obtain file system metadata.
 // It should write that data to resp.
 func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
-	defer fs.Trace("", "")("stat=%+v, err=%v", resp, &err)
+	defer log.Trace("", "")("stat=%+v, err=%v", resp, &err)
 	const blockSize = 4096
 	const fsBlocks = (1 << 50) / blockSize
 	resp.Blocks = fsBlocks  // Total data blocks in file system.
diff --git a/cmd/mount/handle.go b/cmd/mount/handle.go
index c37bf9400..c5c842729 100644
--- a/cmd/mount/handle.go
+++ b/cmd/mount/handle.go
@@ -7,7 +7,7 @@ import (
 
 	"bazil.org/fuse"
 	fusefs "bazil.org/fuse/fs"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"github.com/ncw/rclone/vfs"
 	"golang.org/x/net/context"
 )
@@ -23,7 +23,7 @@ var _ fusefs.HandleReader = (*FileHandle)(nil)
 // Read from the file handle
 func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) {
 	var n int
-	defer fs.Trace(fh, "len=%d, offset=%d", req.Size, req.Offset)("read=%d, err=%v", &n, &err)
+	defer log.Trace(fh, "len=%d, offset=%d", req.Size, req.Offset)("read=%d, err=%v", &n, &err)
 	data := make([]byte, req.Size)
 	n, err = fh.Handle.ReadAt(data, req.Offset)
 	if err == io.EOF {
@@ -40,7 +40,7 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
 
 // Write data to the file handle
 func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
-	defer fs.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
+	defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
 	n, err := fh.Handle.WriteAt(req.Data, req.Offset)
 	if err != nil {
 		return translateError(err)
@@ -68,7 +68,7 @@ var _ fusefs.HandleFlusher = (*FileHandle)(nil)
 // Filesystems shouldn't assume that flush will always be called after
 // some writes, or that if will be called at all.
 func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) {
-	defer fs.Trace(fh, "")("err=%v", &err)
+	defer log.Trace(fh, "")("err=%v", &err)
 	return translateError(fh.Handle.Flush())
 }
 
@@ -79,6 +79,6 @@ var _ fusefs.HandleReleaser = (*FileHandle)(nil)
 // It isn't called directly from userspace so the error is ignored by
 // the kernel
 func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
-	defer fs.Trace(fh, "")("err=%v", &err)
+	defer log.Trace(fh, "")("err=%v", &err)
 	return translateError(fh.Handle.Release())
 }
diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go
index 65064af02..d95923901 100644
--- a/cmd/mountlib/mount.go
+++ b/cmd/mountlib/mount.go
@@ -8,6 +8,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config/flags"
 	"github.com/ncw/rclone/vfs"
 	"github.com/ncw/rclone/vfs/vfsflags"
 	"github.com/pkg/errors"
@@ -181,21 +182,21 @@ will see all files and folders immediately in this mode.
 	cmd.Root.AddCommand(commandDefintion)
 
 	// Add flags
-	flags := commandDefintion.Flags()
-	fs.BoolVarP(flags, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
+	flagSet := commandDefintion.Flags()
+	flags.BoolVarP(flagSet, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
 	// mount options
-	fs.BoolVarP(flags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
-	fs.BoolVarP(flags, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
-	fs.BoolVarP(flags, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
-	fs.BoolVarP(flags, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
-	fs.BoolVarP(flags, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
-	fs.FlagsVarP(flags, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
-	fs.StringArrayVarP(flags, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
-	fs.StringArrayVarP(flags, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
-	//fs.BoolVarP(flags, &foreground, "foreground", "", foreground, "Do not detach.")
+	flags.BoolVarP(flagSet, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
+	flags.BoolVarP(flagSet, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
+	flags.BoolVarP(flagSet, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
+	flags.BoolVarP(flagSet, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
+	flags.BoolVarP(flagSet, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
+	flags.FVarP(flagSet, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
+	flags.StringArrayVarP(flagSet, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
+	flags.StringArrayVarP(flagSet, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
+	//flags.BoolVarP(flagSet, &foreground, "foreground", "", foreground, "Do not detach.")
 
 	// Add in the generic flags
-	vfsflags.AddFlags(flags)
+	vfsflags.AddFlags(flagSet)
 
 	return commandDefintion
 }
diff --git a/cmd/mountlib/mounttest/fs.go b/cmd/mountlib/mounttest/fs.go
index 5b6585d9f..5b2bbfd31 100644
--- a/cmd/mountlib/mounttest/fs.go
+++ b/cmd/mountlib/mounttest/fs.go
@@ -19,6 +19,7 @@ import (
 
 	_ "github.com/ncw/rclone/backend/all" // import all the backends
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/rclone/fstest"
 	"github.com/ncw/rclone/vfs"
 	"github.com/stretchr/testify/assert"
@@ -268,7 +269,7 @@ func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
 
 // reads the remote tree into dir
 func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
-	objs, dirs, err := fs.WalkGetAll(r.fremote, filepath, true, 1)
+	objs, dirs, err := walk.GetAll(r.fremote, filepath, true, 1)
 	if err == fs.ErrorDirNotFound {
 		return
 	}
diff --git a/cmd/move/move.go b/cmd/move/move.go
index 936538d72..5559da866 100644
--- a/cmd/move/move.go
+++ b/cmd/move/move.go
@@ -2,7 +2,7 @@ package move
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/sync"
 	"github.com/spf13/cobra"
 )
 
@@ -44,7 +44,7 @@ If you want to delete empty source directories after move, use the --delete-empt
 		fsrc, fdst := cmd.NewFsSrcDst(args)
 		cmd.Run(true, true, command, func() error {
 
-			return fs.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
+			return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
 		})
 	},
 }
diff --git a/cmd/moveto/moveto.go b/cmd/moveto/moveto.go
index 8e36cd734..c22d37645 100644
--- a/cmd/moveto/moveto.go
+++ b/cmd/moveto/moveto.go
@@ -2,7 +2,8 @@ package moveto
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
+	"github.com/ncw/rclone/fs/sync"
 	"github.com/spf13/cobra"
 )
 
@@ -49,9 +50,9 @@ transfer.
 
 		cmd.Run(true, true, command, func() error {
 			if srcFileName == "" {
-				return fs.MoveDir(fdst, fsrc, false)
+				return sync.MoveDir(fdst, fsrc, false)
 			}
-			return fs.MoveFile(fdst, fsrc, dstFileName, srcFileName)
+			return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
 		})
 	},
 }
diff --git a/cmd/ncdu/scan/scan.go b/cmd/ncdu/scan/scan.go
index 3b6883821..6f8e89248 100644
--- a/cmd/ncdu/scan/scan.go
+++ b/cmd/ncdu/scan/scan.go
@@ -6,6 +6,7 @@ import (
 	"sync"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/pkg/errors"
 )
 
@@ -129,7 +130,7 @@ func Scan(f fs.Fs) (chan *Dir, chan error, chan struct{}) {
 	updated := make(chan struct{}, 1)
 	go func() {
 		parents := map[string]*Dir{}
-		err := fs.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
+		err := walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
 			if err != nil {
 				return err // FIXME mark directory as errored instead of aborting
 			}
diff --git a/cmd/obscure/obscure.go b/cmd/obscure/obscure.go
index f754b8cd9..084c2c98d 100644
--- a/cmd/obscure/obscure.go
+++ b/cmd/obscure/obscure.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/spf13/cobra"
 )
 
@@ -18,7 +18,7 @@ var commandDefintion = &cobra.Command{
 	Run: func(command *cobra.Command, args []string) {
 		cmd.CheckArgs(1, 1, command, args)
 		cmd.Run(false, false, command, func() error {
-			obscure := fs.MustObscure(args[0])
+			obscure := config.MustObscure(args[0])
 			fmt.Println(obscure)
 			return nil
 		})
diff --git a/cmd/purge/purge.go b/cmd/purge/purge.go
index e4c6a9766..68dce5107 100644
--- a/cmd/purge/purge.go
+++ b/cmd/purge/purge.go
@@ -2,7 +2,7 @@ package purge
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -22,7 +22,7 @@ you want to selectively delete files.
 		cmd.CheckArgs(1, 1, command, args)
 		fdst := cmd.NewFsDst(args)
 		cmd.Run(true, false, command, func() error {
-			return fs.Purge(fdst)
+			return operations.Purge(fdst)
 		})
 	},
 }
diff --git a/cmd/rcat/rcat.go b/cmd/rcat/rcat.go
index 3552b1f55..7dfa449b4 100644
--- a/cmd/rcat/rcat.go
+++ b/cmd/rcat/rcat.go
@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -50,7 +50,7 @@ a lot of data, you're better off caching locally and then
 
 		fdst, dstFileName := cmd.NewFsDstFile(args)
 		cmd.Run(false, false, command, func() error {
-			_, err := fs.Rcat(fdst, dstFileName, os.Stdin, time.Now())
+			_, err := operations.Rcat(fdst, dstFileName, os.Stdin, time.Now())
 			return err
 		})
 	},
diff --git a/cmd/rmdir/rmdir.go b/cmd/rmdir/rmdir.go
index ae38367c4..9b10c0696 100644
--- a/cmd/rmdir/rmdir.go
+++ b/cmd/rmdir/rmdir.go
@@ -2,7 +2,7 @@ package rmdir
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -20,7 +20,7 @@ objects in it, use purge for that.`,
 		cmd.CheckArgs(1, 1, command, args)
 		fdst := cmd.NewFsDst(args)
 		cmd.Run(true, false, command, func() error {
-			return fs.Rmdir(fdst, "")
+			return operations.Rmdir(fdst, "")
 		})
 	},
 }
diff --git a/cmd/rmdirs/rmdirs.go b/cmd/rmdirs/rmdirs.go
index 735ef1769..354920b14 100644
--- a/cmd/rmdirs/rmdirs.go
+++ b/cmd/rmdirs/rmdirs.go
@@ -2,7 +2,7 @@ package rmdir
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -32,7 +32,7 @@ empty directories in.
 		cmd.CheckArgs(1, 1, command, args)
 		fdst := cmd.NewFsDst(args)
 		cmd.Run(true, false, command, func() error {
-			return fs.Rmdirs(fdst, "", leaveRoot)
+			return operations.Rmdirs(fdst, "", leaveRoot)
 		})
 	},
 }
diff --git a/cmd/serve/http/http.go b/cmd/serve/http/http.go
index 3c4b79284..39b34f86b 100644
--- a/cmd/serve/http/http.go
+++ b/cmd/serve/http/http.go
@@ -11,6 +11,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
 	"github.com/ncw/rclone/lib/rest"
 	"github.com/ncw/rclone/vfs"
 	"github.com/ncw/rclone/vfs/vfsflags"
@@ -159,7 +160,7 @@ type indexData struct {
 
 // error returns an http.StatusInternalServerError and logs the error
 func internalError(what interface{}, w http.ResponseWriter, text string, err error) {
-	fs.Stats.Error(err)
+	fs.CountError(err)
 	fs.Errorf(what, "%s: %v", text, err)
 	http.Error(w, text+".", http.StatusInternalServerError)
 }
@@ -192,8 +193,8 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
 	}
 
 	// Account the transfer
-	fs.Stats.Transferring(dirRemote)
-	defer fs.Stats.DoneTransferring(dirRemote, true)
+	accounting.Stats.Transferring(dirRemote)
+	defer accounting.Stats.DoneTransferring(dirRemote, true)
 
 	fs.Infof(dirRemote, "%s: Serving directory", r.RemoteAddr)
 	err = indexTemplate.Execute(w, indexData{
@@ -259,8 +260,8 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
 	}()
 
 	// Account the transfer
-	fs.Stats.Transferring(remote)
-	defer fs.Stats.DoneTransferring(remote, true)
+	accounting.Stats.Transferring(remote)
+	defer accounting.Stats.DoneTransferring(remote, true)
 	// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
 
 	// Serve the file
diff --git a/cmd/serve/http/http_test.go b/cmd/serve/http/http_test.go
index aefb8e84d..5647347b7 100644
--- a/cmd/serve/http/http_test.go
+++ b/cmd/serve/http/http_test.go
@@ -14,6 +14,8 @@ import (
 
 	_ "github.com/ncw/rclone/backend/local"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/filter"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
@@ -47,14 +49,14 @@ func startServer(t *testing.T, f fs.Fs) {
 
 func TestInit(t *testing.T) {
 	// Configure the remote
-	fs.LoadConfig()
+	config.LoadConfig()
 	// fs.Config.LogLevel = fs.LogLevelDebug
 	// fs.Config.DumpHeaders = true
 	// fs.Config.DumpBodies = true
 
 	// exclude files called hidden.txt and directories called hidden
-	require.NoError(t, fs.Config.Filter.AddRule("- hidden.txt"))
-	require.NoError(t, fs.Config.Filter.AddRule("- hidden/**"))
+	require.NoError(t, filter.Active.AddRule("- hidden.txt"))
+	require.NoError(t, filter.Active.AddRule("- hidden/**"))
 
 	// Create a test Fs
 	f, err := fs.NewFs("testdata/files")
diff --git a/cmd/serve/webdav/webdav.go b/cmd/serve/webdav/webdav.go
index 61ec8e0a8..5f9945814 100644
--- a/cmd/serve/webdav/webdav.go
+++ b/cmd/serve/webdav/webdav.go
@@ -9,6 +9,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"github.com/ncw/rclone/vfs"
 	"github.com/ncw/rclone/vfs/vfsflags"
 	"github.com/spf13/cobra"
@@ -96,7 +97,7 @@ func (w *WebDAV) logRequest(r *http.Request, err error) {
 
 // Mkdir creates a directory
 func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err error) {
-	defer fs.Trace(name, "perm=%v", perm)("err = %v", &err)
+	defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
 	dir, leaf, err := w.vfs.StatParent(name)
 	if err != nil {
 		return err
@@ -107,13 +108,13 @@ func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err
 
 // OpenFile opens a file or a directory
 func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.FileMode) (file webdav.File, err error) {
-	defer fs.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
+	defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
 	return w.vfs.OpenFile(name, flags, perm)
 }
 
 // RemoveAll removes a file or a directory and its contents
 func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
-	defer fs.Trace(name, "")("err = %v", &err)
+	defer log.Trace(name, "")("err = %v", &err)
 	node, err := w.vfs.Stat(name)
 	if err != nil {
 		return err
@@ -127,13 +128,13 @@ func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
 
 // Rename a file or a directory
 func (w *WebDAV) Rename(ctx context.Context, oldName, newName string) (err error) {
-	defer fs.Trace(oldName, "newName=%q", newName)("err = %v", &err)
+	defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
 	return w.vfs.Rename(oldName, newName)
 }
 
 // Stat returns info about the file or directory
 func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err error) {
-	defer fs.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
+	defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
 	return w.vfs.Stat(name)
 }
 
diff --git a/cmd/sha1sum/sha1sum.go b/cmd/sha1sum/sha1sum.go
index ec181fd8b..6bb048ce3 100644
--- a/cmd/sha1sum/sha1sum.go
+++ b/cmd/sha1sum/sha1sum.go
@@ -4,7 +4,7 @@ import (
 	"os"
 
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -23,7 +23,7 @@ is in the same format as the standard sha1sum tool produces.
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			return fs.Sha1sum(fsrc, os.Stdout)
+			return operations.Sha1sum(fsrc, os.Stdout)
 		})
 	},
 }
diff --git a/cmd/size/size.go b/cmd/size/size.go
index 1d70a3fd0..65dc0793d 100644
--- a/cmd/size/size.go
+++ b/cmd/size/size.go
@@ -5,6 +5,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/spf13/cobra"
 )
 
@@ -19,7 +20,7 @@ var commandDefintion = &cobra.Command{
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		cmd.Run(false, false, command, func() error {
-			objects, size, err := fs.Count(fsrc)
+			objects, size, err := operations.Count(fsrc)
 			if err != nil {
 				return err
 			}
diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go
index fb12e502d..3f5fd5686 100644
--- a/cmd/sync/sync.go
+++ b/cmd/sync/sync.go
@@ -2,7 +2,7 @@ package sync
 
 import (
 	"github.com/ncw/rclone/cmd"
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/sync"
 	"github.com/spf13/cobra"
 )
 
@@ -37,7 +37,7 @@ go there.
 		cmd.CheckArgs(2, 2, command, args)
 		fsrc, fdst := cmd.NewFsSrcDst(args)
 		cmd.Run(true, true, command, func() error {
-			return fs.Sync(fdst, fsrc)
+			return sync.Sync(fdst, fsrc)
 		})
 	},
 }
diff --git a/cmd/touch/touch.go b/cmd/touch/touch.go
index d3b455c97..ce7c45f27 100644
--- a/cmd/touch/touch.go
+++ b/cmd/touch/touch.go
@@ -6,6 +6,7 @@ import (
 
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/object"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 )
@@ -55,7 +56,7 @@ func Touch(fsrc fs.Fs, srcFileName string) error {
 	if err != nil {
 		if !notCreateNewFile {
 			var buffer []byte
-			src := fs.NewStaticObjectInfo(srcFileName, timeAtr, int64(len(buffer)), true, nil, fsrc)
+			src := object.NewStaticObjectInfo(srcFileName, timeAtr, int64(len(buffer)), true, nil, fsrc)
 			_, err = fsrc.Put(bytes.NewBuffer(buffer), src)
 			if err != nil {
 				return err
diff --git a/cmd/tree/tree.go b/cmd/tree/tree.go
index 7927ba97e..86fdefd1e 100644
--- a/cmd/tree/tree.go
+++ b/cmd/tree/tree.go
@@ -3,7 +3,6 @@ package tree
 import (
 	"fmt"
 	"io"
-	"log"
 	"os"
 	"path"
 	"path/filepath"
@@ -13,6 +12,8 @@ import (
 	"github.com/a8m/tree"
 	"github.com/ncw/rclone/cmd"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 )
@@ -88,7 +89,7 @@ The tree command has many options for controlling the listing which
 are compatible with the tree command.  Note that not all of them have
 short options as they conflict with rclone's short options.
 `,
-	Run: func(command *cobra.Command, args []string) {
+	RunE: func(command *cobra.Command, args []string) error {
 		cmd.CheckArgs(1, 1, command, args)
 		fsrc := cmd.NewFsSrc(args)
 		outFile := os.Stdout
@@ -96,7 +97,7 @@ short options as they conflict with rclone's short options.
 			var err error
 			outFile, err = os.Create(outFileName)
 			if err != nil {
-				log.Fatalf("Failed to create output file: %v", err)
+				return errors.Errorf("failed to create output file: %v", err)
 			}
 		}
 		opts.VerSort = opts.VerSort || sort == "version"
@@ -110,12 +111,13 @@ short options as they conflict with rclone's short options.
 		cmd.Run(false, false, command, func() error {
 			return Tree(fsrc, outFile, &opts)
 		})
+		return nil
 	},
 }
 
 // Tree lists fsrc to outFile using the Options passed in
 func Tree(fsrc fs.Fs, outFile io.Writer, opts *tree.Options) error {
-	dirs, err := fs.NewDirTree(fsrc, "", false, opts.DeepLevel)
+	dirs, err := walk.NewDirTree(fsrc, "", false, opts.DeepLevel)
 	if err != nil {
 		return err
 	}
@@ -183,22 +185,22 @@ func (to *FileInfo) String() string {
 }
 
 // Fs maps an fs.Fs into a tree.Fs
-type Fs fs.DirTree
+type Fs walk.DirTree
 
 // NewFs creates a new tree
-func NewFs(dirs fs.DirTree) Fs {
+func NewFs(dirs walk.DirTree) Fs {
 	return Fs(dirs)
 }
 
 // Stat returns info about the file
 func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
-	defer fs.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err)
+	defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err)
 	filePath = filepath.ToSlash(filePath)
 	filePath = strings.TrimLeft(filePath, "/")
 	if filePath == "" {
 		return &FileInfo{fs.NewDir("", time.Now())}, nil
 	}
-	_, entry := fs.DirTree(dirs).Find(filePath)
+	_, entry := walk.DirTree(dirs).Find(filePath)
 	if entry == nil {
 		return nil, errors.Errorf("Couldn't find %q in directory cache", filePath)
 	}
@@ -207,7 +209,7 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
 
 // ReadDir returns info about the directory and fills up the directory cache
 func (dirs Fs) ReadDir(dir string) (names []string, err error) {
-	defer fs.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err)
+	defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err)
 	dir = filepath.ToSlash(dir)
 	dir = strings.TrimLeft(dir, "/")
 	entries, ok := dirs[dir]
diff --git a/fs/accounting.go b/fs/accounting/accounting.go
similarity index 84%
rename from fs/accounting.go
rename to fs/accounting/accounting.go
index a80da7ec0..2f9e525fa 100644
--- a/fs/accounting.go
+++ b/fs/accounting/accounting.go
@@ -1,6 +1,5 @@
-// Accounting and limiting reader
-
-package fs
+// Package accounting providers an accounting and limiting reader
+package accounting
 
 import (
 	"bytes"
@@ -12,6 +11,8 @@ import (
 	"time"
 
 	"github.com/VividCortex/ewma"
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/asyncreader"
 	"golang.org/x/net/context" // switch to "context" when we stop supporting go1.6
 	"golang.org/x/time/rate"
 )
@@ -24,31 +25,36 @@ var (
 	prevTokenBucket   = tokenBucket
 	bwLimitToggledOff = false
 	currLimitMu       sync.Mutex // protects changes to the timeslot
-	currLimit         BwTimeSlot
+	currLimit         fs.BwTimeSlot
 )
 
+func init() {
+	// Set the function pointer up in fs
+	fs.CountError = Stats.Error
+}
+
 const maxBurstSize = 1 * 1024 * 1024 // must be bigger than the biggest request
 
 // make a new empty token bucket with the bandwidth given
-func newTokenBucket(bandwidth SizeSuffix) *rate.Limiter {
+func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
 	newTokenBucket := rate.NewLimiter(rate.Limit(bandwidth), maxBurstSize)
 	// empty the bucket
 	err := newTokenBucket.WaitN(context.Background(), maxBurstSize)
 	if err != nil {
-		Errorf(nil, "Failed to empty token bucket: %v", err)
+		fs.Errorf(nil, "Failed to empty token bucket: %v", err)
 	}
 	return newTokenBucket
 }
 
-// Start the token bucket if necessary
-func startTokenBucket() {
+// StartTokenBucket starts the token bucket if necessary
+func StartTokenBucket() {
 	currLimitMu.Lock()
-	currLimit := bwLimit.LimitAt(time.Now())
+	currLimit := fs.Config.BwLimit.LimitAt(time.Now())
 	currLimitMu.Unlock()
 
-	if currLimit.bandwidth > 0 {
-		tokenBucket = newTokenBucket(currLimit.bandwidth)
-		Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.bandwidth)
+	if currLimit.Bandwidth > 0 {
+		tokenBucket = newTokenBucket(currLimit.Bandwidth)
+		fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.Bandwidth)
 
 		// Start the SIGUSR2 signal handler to toggle bandwidth.
 		// This function does nothing in windows systems.
@@ -56,21 +62,21 @@ func startTokenBucket() {
 	}
 }
 
-// startTokenTicker creates a ticker to update the bandwidth limiter every minute.
-func startTokenTicker() {
+// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
+func StartTokenTicker() {
 	// If the timetable has a single entry or was not specified, we don't need
 	// a ticker to update the bandwidth.
-	if len(bwLimit) <= 1 {
+	if len(fs.Config.BwLimit) <= 1 {
 		return
 	}
 
 	ticker := time.NewTicker(time.Minute)
 	go func() {
 		for range ticker.C {
-			limitNow := bwLimit.LimitAt(time.Now())
+			limitNow := fs.Config.BwLimit.LimitAt(time.Now())
 			currLimitMu.Lock()
 
-			if currLimit.bandwidth != limitNow.bandwidth {
+			if currLimit.Bandwidth != limitNow.Bandwidth {
 				tokenBucketMu.Lock()
 
 				// If bwlimit is toggled off, the change should only
@@ -84,17 +90,17 @@ func startTokenTicker() {
 				}
 
 				// Set new bandwidth. If unlimited, set tokenbucket to nil.
-				if limitNow.bandwidth > 0 {
-					*targetBucket = newTokenBucket(limitNow.bandwidth)
+				if limitNow.Bandwidth > 0 {
+					*targetBucket = newTokenBucket(limitNow.Bandwidth)
 					if bwLimitToggledOff {
-						Logf(nil, "Scheduled bandwidth change. "+
-							"Limit will be set to %vBytes/s when toggled on again.", &limitNow.bandwidth)
+						fs.Logf(nil, "Scheduled bandwidth change. "+
+							"Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth)
 					} else {
-						Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.bandwidth)
+						fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth)
 					}
 				} else {
 					*targetBucket = nil
-					Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
+					fs.Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
 				}
 
 				currLimit = limitNow
@@ -117,7 +123,7 @@ type inProgress struct {
 // newInProgress makes a new inProgress object
 func newInProgress() *inProgress {
 	return &inProgress{
-		m: make(map[string]*Account, Config.Transfers),
+		m: make(map[string]*Account, fs.Config.Transfers),
 	}
 }
 
@@ -181,8 +187,8 @@ type StatsInfo struct {
 // NewStats cretates an initialised StatsInfo
 func NewStats() *StatsInfo {
 	return &StatsInfo{
-		checking:     make(stringSet, Config.Checkers),
-		transferring: make(stringSet, Config.Transfers),
+		checking:     make(stringSet, fs.Config.Checkers),
+		transferring: make(stringSet, fs.Config.Transfers),
 		start:        time.Now(),
 		inProgress:   newInProgress(),
 	}
@@ -201,7 +207,7 @@ func (s *StatsInfo) String() string {
 	dtRounded := dt - (dt % (time.Second / 10))
 	buf := &bytes.Buffer{}
 
-	if Config.DataRateUnit == "bits" {
+	if fs.Config.DataRateUnit == "bits" {
 		speed = speed * 8
 	}
 
@@ -212,7 +218,7 @@ Checks:        %10d
 Transferred:   %10d
 Elapsed time:  %10v
 `,
-		SizeSuffix(s.bytes).Unit("Bytes"), SizeSuffix(speed).Unit(strings.Title(Config.DataRateUnit)+"/s"),
+		fs.SizeSuffix(s.bytes).Unit("Bytes"), fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
 		s.errors,
 		s.checks,
 		s.transfers,
@@ -228,7 +234,7 @@ Elapsed time:  %10v
 
 // Log outputs the StatsInfo to the log
 func (s *StatsInfo) Log() {
-	LogLevelPrintf(Config.StatsLogLevel, nil, "%v\n", s)
+	fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s)
 }
 
 // Bytes updates the stats for bytes bytes
@@ -375,7 +381,7 @@ func NewAccountSizeName(in io.ReadCloser, size int64, name string) *Account {
 }
 
 // NewAccount makes a Account reader for an object
-func NewAccount(in io.ReadCloser, obj Object) *Account {
+func NewAccount(in io.ReadCloser, obj fs.Object) *Account {
 	return NewAccountSizeName(in, obj.Size(), obj.Remote())
 }
 
@@ -383,16 +389,16 @@ func NewAccount(in io.ReadCloser, obj Object) *Account {
 func (acc *Account) WithBuffer() *Account {
 	acc.withBuf = true
 	var buffers int
-	if acc.size >= int64(Config.BufferSize) || acc.size == -1 {
-		buffers = int(int64(Config.BufferSize) / asyncBufferSize)
+	if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
+		buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize)
 	} else {
-		buffers = int(acc.size / asyncBufferSize)
+		buffers = int(acc.size / asyncreader.BufferSize)
 	}
 	// On big files add a buffer
 	if buffers > 0 {
-		in, err := newAsyncReader(acc.in, buffers)
+		in, err := asyncreader.New(acc.in, buffers)
 		if err != nil {
-			Errorf(acc.name, "Failed to make buffer: %v", err)
+			fs.Errorf(acc.name, "Failed to make buffer: %v", err)
 		} else {
 			acc.in = in
 		}
@@ -409,7 +415,7 @@ func (acc *Account) GetReader() io.ReadCloser {
 
 // StopBuffering stops the async buffer doing any more buffering
 func (acc *Account) StopBuffering() {
-	if asyncIn, ok := acc.in.(*asyncReader); ok {
+	if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
 		asyncIn.Abandon()
 	}
 }
@@ -484,7 +490,7 @@ func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
 	if tokenBucket != nil {
 		tbErr := tokenBucket.WaitN(context.Background(), n)
 		if tbErr != nil {
-			Errorf(nil, "Token bucket error: %v", err)
+			fs.Errorf(nil, "Token bucket error: %v", err)
 		}
 	}
 	tokenBucketMu.Unlock()
@@ -572,14 +578,14 @@ func (acc *Account) String() string {
 		}
 	}
 	name := []rune(acc.name)
-	if Config.StatsFileNameLength > 0 {
-		if len(name) > Config.StatsFileNameLength {
-			where := len(name) - Config.StatsFileNameLength
+	if fs.Config.StatsFileNameLength > 0 {
+		if len(name) > fs.Config.StatsFileNameLength {
+			where := len(name) - fs.Config.StatsFileNameLength
 			name = append([]rune{'.', '.', '.'}, name[where:]...)
 		}
 	}
 
-	if Config.DataRateUnit == "bits" {
+	if fs.Config.DataRateUnit == "bits" {
 		cur = cur * 8
 	}
 
@@ -588,12 +594,12 @@ func (acc *Account) String() string {
 		percentageDone = int(100 * float64(a) / float64(b))
 	}
 
-	done := fmt.Sprintf("%2d%% /%s", percentageDone, SizeSuffix(b))
+	done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b))
 
 	return fmt.Sprintf("%45s: %s, %s/s, %s",
 		string(name),
 		done,
-		SizeSuffix(cur),
+		fs.SizeSuffix(cur),
 		etas,
 	)
 }
@@ -633,10 +639,10 @@ func (a *accountStream) Read(p []byte) (n int, err error) {
 // AccountByPart turns off whole file accounting
 //
 // Returns the current account or nil if not found
-func AccountByPart(obj Object) *Account {
+func AccountByPart(obj fs.Object) *Account {
 	acc := Stats.inProgress.get(obj.Remote())
 	if acc == nil {
-		Debugf(obj, "Didn't find object to account part transfer")
+		fs.Debugf(obj, "Didn't find object to account part transfer")
 		return nil
 	}
 	acc.disableWholeFileAccounting()
@@ -647,7 +653,7 @@ func AccountByPart(obj Object) *Account {
 //
 // It disables the whole file counter and returns an io.Reader to wrap
 // a segment of the transfer.
-func AccountPart(obj Object, in io.Reader) io.Reader {
+func AccountPart(obj fs.Object, in io.Reader) io.Reader {
 	acc := AccountByPart(obj)
 	if acc == nil {
 		return in
diff --git a/fs/accounting_other.go b/fs/accounting/accounting_other.go
similarity index 93%
rename from fs/accounting_other.go
rename to fs/accounting/accounting_other.go
index 48fb37411..9a9cb2975 100644
--- a/fs/accounting_other.go
+++ b/fs/accounting/accounting_other.go
@@ -3,7 +3,7 @@
 
 // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
 
-package fs
+package accounting
 
 // startSignalHandler() is Unix specific and does nothing under non-Unix
 // platforms.
diff --git a/fs/accounting_unix.go b/fs/accounting/accounting_unix.go
similarity index 87%
rename from fs/accounting_unix.go
rename to fs/accounting/accounting_unix.go
index 5ca04b3ac..20d659d3d 100644
--- a/fs/accounting_unix.go
+++ b/fs/accounting/accounting_unix.go
@@ -3,12 +3,14 @@
 
 // +build darwin dragonfly freebsd linux netbsd openbsd solaris
 
-package fs
+package accounting
 
 import (
 	"os"
 	"os/signal"
 	"syscall"
+
+	"github.com/ncw/rclone/fs"
 )
 
 // startSignalHandler() sets a signal handler to catch SIGUSR2 and toggle throttling.
@@ -28,7 +30,7 @@ func startSignalHandler() {
 				s = "enabled"
 			}
 			tokenBucketMu.Unlock()
-			Logf(nil, "Bandwidth limit %s by user", s)
+			fs.Logf(nil, "Bandwidth limit %s by user", s)
 		}
 	}()
 }
diff --git a/fs/buffer.go b/fs/asyncreader/asyncreader.go
similarity index 82%
rename from fs/buffer.go
rename to fs/asyncreader/asyncreader.go
index 588a36d00..e19a4766d 100644
--- a/fs/buffer.go
+++ b/fs/asyncreader/asyncreader.go
@@ -1,14 +1,18 @@
-package fs
+// Package asyncreader provides an asynchronous reader which reads
+// independently of write
+package asyncreader
 
 import (
 	"io"
 	"sync"
 
+	"github.com/ncw/rclone/lib/readers"
 	"github.com/pkg/errors"
 )
 
 const (
-	asyncBufferSize  = 1024 * 1024
+	// BufferSize is the default size of the async buffer
+	BufferSize       = 1024 * 1024
 	softStartInitial = 4 * 1024
 )
 
@@ -18,11 +22,11 @@ var asyncBufferPool = sync.Pool{
 
 var errorStreamAbandoned = errors.New("stream abandoned")
 
-// asyncReader will do async read-ahead from the input reader
+// AsyncReader will do async read-ahead from the input reader
 // and make the data available as an io.Reader.
 // This should be fully transparent, except that once an error
 // has been returned from the Reader, it will not recover.
-type asyncReader struct {
+type AsyncReader struct {
 	in      io.ReadCloser // Input reader
 	ready   chan *buffer  // Buffers ready to be handed to the reader
 	token   chan struct{} // Tokens which allow a buffer to be taken
@@ -36,25 +40,25 @@ type asyncReader struct {
 	mu      sync.Mutex    // lock for Read/WriteTo/Abandon/Close
 }
 
-// newAsyncReader returns a reader that will asynchronously read from
-// the supplied Reader into a number of buffers each of size asyncBufferSize
+// New returns a reader that will asynchronously read from
+// the supplied Reader into a number of buffers each of size BufferSize
 // It will start reading from the input at once, maybe even before this
 // function has returned.
 // The input can be read from the returned reader.
 // When done use Close to release the buffers and close the supplied input.
-func newAsyncReader(rd io.ReadCloser, buffers int) (*asyncReader, error) {
+func New(rd io.ReadCloser, buffers int) (*AsyncReader, error) {
 	if buffers <= 0 {
 		return nil, errors.New("number of buffers too small")
 	}
 	if rd == nil {
 		return nil, errors.New("nil reader supplied")
 	}
-	a := &asyncReader{}
+	a := &AsyncReader{}
 	a.init(rd, buffers)
 	return a, nil
 }
 
-func (a *asyncReader) init(rd io.ReadCloser, buffers int) {
+func (a *AsyncReader) init(rd io.ReadCloser, buffers int) {
 	a.in = rd
 	a.ready = make(chan *buffer, buffers)
 	a.token = make(chan struct{}, buffers)
@@ -78,7 +82,7 @@ func (a *asyncReader) init(rd io.ReadCloser, buffers int) {
 			select {
 			case <-a.token:
 				b := a.getBuffer()
-				if a.size < asyncBufferSize {
+				if a.size < BufferSize {
 					b.buf = b.buf[:a.size]
 					a.size <<= 1
 				}
@@ -95,19 +99,19 @@ func (a *asyncReader) init(rd io.ReadCloser, buffers int) {
 }
 
 // return the buffer to the pool (clearing it)
-func (a *asyncReader) putBuffer(b *buffer) {
+func (a *AsyncReader) putBuffer(b *buffer) {
 	b.clear()
 	asyncBufferPool.Put(b)
 }
 
 // get a buffer from the pool
-func (a *asyncReader) getBuffer() *buffer {
+func (a *AsyncReader) getBuffer() *buffer {
 	b := asyncBufferPool.Get().(*buffer)
 	return b
 }
 
 // Read will return the next available data.
-func (a *asyncReader) fill() (err error) {
+func (a *AsyncReader) fill() (err error) {
 	if a.cur.isEmpty() {
 		if a.cur != nil {
 			a.putBuffer(a.cur)
@@ -128,7 +132,7 @@ func (a *asyncReader) fill() (err error) {
 }
 
 // Read will return the next available data.
-func (a *asyncReader) Read(p []byte) (n int, err error) {
+func (a *AsyncReader) Read(p []byte) (n int, err error) {
 	a.mu.Lock()
 	defer a.mu.Unlock()
 
@@ -153,7 +157,7 @@ func (a *asyncReader) Read(p []byte) (n int, err error) {
 // WriteTo writes data to w until there's no more data to write or when an error occurs.
 // The return value n is the number of bytes written.
 // Any error encountered during the write is also returned.
-func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
+func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
 	a.mu.Lock()
 	defer a.mu.Unlock()
 
@@ -177,8 +181,8 @@ func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
 }
 
 // Abandon will ensure that the underlying async reader is shut down.
-// It will NOT close the input supplied on newAsyncReader.
-func (a *asyncReader) Abandon() {
+// It will NOT close the input supplied on New.
+func (a *AsyncReader) Abandon() {
 	select {
 	case <-a.exit:
 		// Do nothing if reader routine already exited
@@ -202,8 +206,8 @@ func (a *asyncReader) Abandon() {
 }
 
 // Close will ensure that the underlying async reader is shut down.
-// It will also close the input supplied on newAsyncReader.
-func (a *asyncReader) Close() (err error) {
+// It will also close the input supplied on New.
+func (a *AsyncReader) Close() (err error) {
 	a.Abandon()
 	if a.closed {
 		return nil
@@ -223,7 +227,7 @@ type buffer struct {
 
 func newBuffer() *buffer {
 	return &buffer{
-		buf: make([]byte, asyncBufferSize),
+		buf: make([]byte, BufferSize),
 		err: nil,
 	}
 }
@@ -252,7 +256,7 @@ func (b *buffer) isEmpty() bool {
 // Any error encountered during the read is returned.
 func (b *buffer) read(rd io.Reader) error {
 	var n int
-	n, b.err = ReadFill(rd, b.buf)
+	n, b.err = readers.ReadFill(rd, b.buf)
 	b.buf = b.buf[0:n]
 	b.offset = 0
 	return b.err
diff --git a/fs/buffer_test.go b/fs/asyncreader/asyncreader_test.go
similarity index 92%
rename from fs/buffer_test.go
rename to fs/asyncreader/asyncreader_test.go
index 936ba39fa..2f9648f91 100644
--- a/fs/buffer_test.go
+++ b/fs/asyncreader/asyncreader_test.go
@@ -1,4 +1,4 @@
-package fs
+package asyncreader
 
 import (
 	"bufio"
@@ -17,7 +17,7 @@ import (
 
 func TestAsyncReader(t *testing.T) {
 	buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
-	ar, err := newAsyncReader(buf, 4)
+	ar, err := New(buf, 4)
 	require.NoError(t, err)
 
 	var dst = make([]byte, 100)
@@ -42,7 +42,7 @@ func TestAsyncReader(t *testing.T) {
 
 	// Test Close without reading everything
 	buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
-	ar, err = newAsyncReader(buf, 4)
+	ar, err = New(buf, 4)
 	require.NoError(t, err)
 	err = ar.Close()
 	require.NoError(t, err)
@@ -51,7 +51,7 @@ func TestAsyncReader(t *testing.T) {
 
 func TestAsyncWriteTo(t *testing.T) {
 	buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
-	ar, err := newAsyncReader(buf, 4)
+	ar, err := New(buf, 4)
 	require.NoError(t, err)
 
 	var dst = &bytes.Buffer{}
@@ -70,14 +70,14 @@ func TestAsyncWriteTo(t *testing.T) {
 
 func TestAsyncReaderErrors(t *testing.T) {
 	// test nil reader
-	_, err := newAsyncReader(nil, 4)
+	_, err := New(nil, 4)
 	require.Error(t, err)
 
 	// invalid buffer number
 	buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
-	_, err = newAsyncReader(buf, 0)
+	_, err = New(buf, 0)
 	require.Error(t, err)
-	_, err = newAsyncReader(buf, -1)
+	_, err = New(buf, -1)
 	require.Error(t, err)
 }
 
@@ -157,9 +157,9 @@ func TestAsyncReaderSizes(t *testing.T) {
 						bufsize := bufsizes[k]
 						read := readmaker.fn(strings.NewReader(text))
 						buf := bufio.NewReaderSize(read, bufsize)
-						ar, _ := newAsyncReader(ioutil.NopCloser(buf), l)
+						ar, _ := New(ioutil.NopCloser(buf), l)
 						s := bufreader.fn(ar)
-						// "timeout" expects the Reader to recover, asyncReader does not.
+						// "timeout" expects the Reader to recover, AsyncReader does not.
 						if s != text && readmaker.name != "timeout" {
 							t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
 								readmaker.name, bufreader.name, bufsize, text, s)
@@ -196,14 +196,14 @@ func TestAsyncReaderWriteTo(t *testing.T) {
 						bufsize := bufsizes[k]
 						read := readmaker.fn(strings.NewReader(text))
 						buf := bufio.NewReaderSize(read, bufsize)
-						ar, _ := newAsyncReader(ioutil.NopCloser(buf), l)
+						ar, _ := New(ioutil.NopCloser(buf), l)
 						dst := &bytes.Buffer{}
 						_, err := ar.WriteTo(dst)
 						if err != nil && err != io.EOF && err != iotest.ErrTimeout {
 							t.Fatal("Copy:", err)
 						}
 						s := dst.String()
-						// "timeout" expects the Reader to recover, asyncReader does not.
+						// "timeout" expects the Reader to recover, AsyncReader does not.
 						if s != text && readmaker.name != "timeout" {
 							t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
 								readmaker.name, bufreader.name, bufsize, text, s)
@@ -243,7 +243,7 @@ func (z *zeroReader) Close() error {
 // Test closing and abandoning
 func testAsyncReaderClose(t *testing.T, writeto bool) {
 	zr := &zeroReader{}
-	a, err := newAsyncReader(zr, 16)
+	a, err := New(zr, 16)
 	require.NoError(t, err)
 	var copyN int64
 	var copyErr error
diff --git a/fs/bwtimetable.go b/fs/bwtimetable.go
new file mode 100644
index 000000000..558bc9da1
--- /dev/null
+++ b/fs/bwtimetable.go
@@ -0,0 +1,132 @@
+package fs
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// BwTimeSlot represents a bandwidth configuration at a point in time.
+type BwTimeSlot struct {
+	HHMM      int
+	Bandwidth SizeSuffix
+}
+
+// BwTimetable contains all configured time slots.
+type BwTimetable []BwTimeSlot
+
+// String returns a printable representation of BwTimetable.
+func (x BwTimetable) String() string {
+	ret := []string{}
+	for _, ts := range x {
+		ret = append(ret, fmt.Sprintf("%04.4d,%s", ts.HHMM, ts.Bandwidth.String()))
+	}
+	return strings.Join(ret, " ")
+}
+
+// Set the bandwidth timetable.
+func (x *BwTimetable) Set(s string) error {
+	// The timetable is formatted as:
+	// "hh:mm,bandwidth hh:mm,banwidth..." ex: "10:00,10G 11:30,1G 18:00,off"
+	// If only a single bandwidth identifier is provided, we assume constant bandwidth.
+
+	if len(s) == 0 {
+		return errors.New("empty string")
+	}
+	// Single value without time specification.
+	if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
+		ts := BwTimeSlot{}
+		if err := ts.Bandwidth.Set(s); err != nil {
+			return err
+		}
+		ts.HHMM = 0
+		*x = BwTimetable{ts}
+		return nil
+	}
+
+	for _, tok := range strings.Split(s, " ") {
+		tv := strings.Split(tok, ",")
+
+		// Format must be HH:MM,BW
+		if len(tv) != 2 {
+			return errors.Errorf("invalid time/bandwidth specification: %q", tok)
+		}
+
+		// Basic timespec sanity checking
+		HHMM := tv[0]
+		if len(HHMM) != 5 {
+			return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
+		}
+		hh, err := strconv.Atoi(HHMM[0:2])
+		if err != nil {
+			return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
+		}
+		if hh < 0 || hh > 23 {
+			return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
+		}
+		mm, err := strconv.Atoi(HHMM[3:])
+		if err != nil {
+			return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
+		}
+		if mm < 0 || mm > 59 {
+			return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
+		}
+
+		ts := BwTimeSlot{
+			HHMM: (hh * 100) + mm,
+		}
+		// Bandwidth limit for this time slot.
+		if err := ts.Bandwidth.Set(tv[1]); err != nil {
+			return err
+		}
+		*x = append(*x, ts)
+	}
+	return nil
+}
+
+// LimitAt returns a BwTimeSlot for the time requested.
+func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
+	// If the timetable is empty, we return an unlimited BwTimeSlot starting at midnight.
+	if len(x) == 0 {
+		return BwTimeSlot{HHMM: 0, Bandwidth: -1}
+	}
+
+	HHMM := tt.Hour()*100 + tt.Minute()
+
+	// By default, we return the last element in the timetable. This
+	// satisfies two conditions: 1) If there's only one element it
+	// will always be selected, and 2) The last element of the table
+	// will "wrap around" until overriden by an earlier time slot.
+	// there's only one time slot in the timetable.
+	ret := x[len(x)-1]
+
+	mindif := 0
+	first := true
+
+	// Look for most recent time slot.
+	for _, ts := range x {
+		// Ignore the past
+		if HHMM < ts.HHMM {
+			continue
+		}
+		dif := ((HHMM / 100 * 60) + (HHMM % 100)) - ((ts.HHMM / 100 * 60) + (ts.HHMM % 100))
+		if first {
+			mindif = dif
+			first = false
+		}
+		if dif <= mindif {
+			mindif = dif
+			ret = ts
+		}
+	}
+
+	return ret
+}
+
+// Type of the value
+func (x BwTimetable) Type() string {
+	return "BwTimetable"
+}
diff --git a/fs/bwtimetable_test.go b/fs/bwtimetable_test.go
new file mode 100644
index 000000000..6c1fd3bd5
--- /dev/null
+++ b/fs/bwtimetable_test.go
@@ -0,0 +1,113 @@
+package fs
+
+import (
+	"testing"
+	"time"
+
+	"github.com/spf13/pflag"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Check it satisfies the interface
+var _ pflag.Value = (*BwTimetable)(nil)
+
+func TestBwTimetableSet(t *testing.T) {
+	for _, test := range []struct {
+		in   string
+		want BwTimetable
+		err  bool
+	}{
+		{"", BwTimetable{}, true},
+		{"0", BwTimetable{BwTimeSlot{HHMM: 0, Bandwidth: 0}}, false},
+		{"666", BwTimetable{BwTimeSlot{HHMM: 0, Bandwidth: 666 * 1024}}, false},
+		{"10:20,666", BwTimetable{BwTimeSlot{HHMM: 1020, Bandwidth: 666 * 1024}}, false},
+		{
+			"11:00,333 13:40,666 23:50,10M 23:59,off",
+			BwTimetable{
+				BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
+				BwTimeSlot{HHMM: 1340, Bandwidth: 666 * 1024},
+				BwTimeSlot{HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
+				BwTimeSlot{HHMM: 2359, Bandwidth: -1},
+			},
+			false,
+		},
+		{"bad,bad", BwTimetable{}, true},
+		{"bad bad", BwTimetable{}, true},
+		{"bad", BwTimetable{}, true},
+		{"1000X", BwTimetable{}, true},
+		{"2401,666", BwTimetable{}, true},
+		{"1061,666", BwTimetable{}, true},
+	} {
+		tt := BwTimetable{}
+		err := tt.Set(test.in)
+		if test.err {
+			require.Error(t, err)
+		} else {
+			require.NoError(t, err)
+		}
+		assert.Equal(t, test.want, tt)
+	}
+}
+
+func TestBwTimetableLimitAt(t *testing.T) {
+	for _, test := range []struct {
+		tt   BwTimetable
+		now  time.Time
+		want BwTimeSlot
+	}{
+		{
+			BwTimetable{},
+			time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
+			BwTimeSlot{HHMM: 0, Bandwidth: -1},
+		},
+		{
+			BwTimetable{BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024}},
+			time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
+			BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
+		},
+		{
+			BwTimetable{
+				BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
+				BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
+				BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
+				BwTimeSlot{HHMM: 2350, Bandwidth: -1},
+			},
+			time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
+			BwTimeSlot{HHMM: 2350, Bandwidth: -1},
+		},
+		{
+			BwTimetable{
+				BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
+				BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
+				BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
+				BwTimeSlot{HHMM: 2350, Bandwidth: -1},
+			},
+			time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
+			BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
+		},
+		{
+			BwTimetable{
+				BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
+				BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
+				BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
+				BwTimeSlot{HHMM: 2350, Bandwidth: -1},
+			},
+			time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
+			BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
+		},
+		{
+			BwTimetable{
+				BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
+				BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
+				BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
+				BwTimeSlot{HHMM: 2350, Bandwidth: -1},
+			},
+			time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
+			BwTimeSlot{HHMM: 2350, Bandwidth: -1},
+		},
+	} {
+		slot := test.tt.LimitAt(test.now)
+		assert.Equal(t, test.want, slot)
+	}
+}
diff --git a/fs/config.go b/fs/config.go
index 54c25f520..330110e05 100644
--- a/fs/config.go
+++ b/fs/config.go
@@ -1,222 +1,29 @@
-// Read, write and edit the config file
-
 package fs
 
 import (
-	"bufio"
-	"bytes"
-	"crypto/aes"
-	"crypto/cipher"
-	"crypto/rand"
-	"crypto/sha256"
-	"encoding/base64"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
 	"net"
-	"os"
-	"os/user"
-	"path/filepath"
-	"regexp"
-	"runtime"
-	"sort"
-	"strconv"
-	"strings"
 	"time"
-	"unicode/utf8"
-
-	"github.com/Unknwon/goconfig"
-	"github.com/pkg/errors"
-	"github.com/spf13/pflag"
-	"golang.org/x/crypto/nacl/secretbox"
-	"golang.org/x/text/unicode/norm"
-)
-
-const (
-	configFileName       = "rclone.conf"
-	hiddenConfigFileName = "." + configFileName
-
-	// ConfigToken is the key used to store the token under
-	ConfigToken = "token"
-
-	// ConfigClientID is the config key used to store the client id
-	ConfigClientID = "client_id"
-
-	// ConfigClientSecret is the config key used to store the client secret
-	ConfigClientSecret = "client_secret"
-
-	// ConfigAuthURL is the config key used to store the auth server endpoint
-	ConfigAuthURL = "auth_url"
-
-	// ConfigTokenURL is the config key used to store the token server endpoint
-	ConfigTokenURL = "token_url"
-
-	// ConfigAutomatic indicates that we want non-interactive configuration
-	ConfigAutomatic = "config_automatic"
 )
 
 // Global
 var (
-	// configData is the config file data structure
-	configData *goconfig.ConfigFile
-
-	// ConfigPath points to the config file
-	ConfigPath = makeConfigPath()
-
-	// CacheDir points to the cache directory.  Users of this
-	// should make a subdirectory and use MkdirAll() to create it
-	// and any parents.
-	CacheDir = makeCacheDir()
-
 	// Config is the global config
-	Config = &ConfigInfo{}
+	Config = NewConfig()
 
-	// Flags
-	verbose               = CountP("verbose", "v", "Print lots more stuff (repeat for more)")
-	quiet                 = BoolP("quiet", "q", false, "Print as little stuff as possible")
-	modifyWindow          = DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
-	checkers              = IntP("checkers", "", 8, "Number of checkers to run in parallel.")
-	transfers             = IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
-	configFile            = StringP("config", "", ConfigPath, "Config file.")
-	cacheDir              = StringP("cache-dir", "", CacheDir, "Directory rclone will use for caching.")
-	checkSum              = BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
-	sizeOnly              = BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
-	ignoreTimes           = BoolP("ignore-times", "I", false, "Don't skip files that match size and time - transfer all files")
-	ignoreExisting        = BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
-	dryRun                = BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
-	connectTimeout        = DurationP("contimeout", "", 60*time.Second, "Connect timeout")
-	timeout               = DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
-	dumpHeaders           = BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
-	dumpBodies            = BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
-	skipVerify            = BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
-	AskPassword           = BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
-	deleteBefore          = BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
-	deleteDuring          = BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
-	deleteAfter           = BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
-	trackRenames          = BoolP("track-renames", "", false, "When synchronizing, track file renames and do a server side move if possible")
-	lowLevelRetries       = IntP("low-level-retries", "", 10, "Number of low level retries to do.")
-	updateOlder           = BoolP("update", "u", false, "Skip files that are newer on the destination.")
-	noGzip                = BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
-	maxDepth              = IntP("max-depth", "", -1, "If set limits the recursion depth to this.")
-	ignoreSize            = BoolP("ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
-	ignoreChecksum        = BoolP("ignore-checksum", "", false, "Skip post copy check of checksums.")
-	noTraverse            = BoolP("no-traverse", "", false, "Don't traverse destination file system on copy.")
-	noUpdateModTime       = BoolP("no-update-modtime", "", false, "Don't update destination mod-time if files identical.")
-	backupDir             = StringP("backup-dir", "", "", "Make backups into hierarchy based in DIR.")
-	suffix                = StringP("suffix", "", "", "Suffix for use with --backup-dir.")
-	useListR              = BoolP("fast-list", "", false, "Use recursive list if available. Uses more memory but fewer transactions.")
-	tpsLimit              = Float64P("tpslimit", "", 0, "Limit HTTP transactions per second to this.")
-	tpsLimitBurst         = IntP("tpslimit-burst", "", 1, "Max burst of transactions for --tpslimit.")
-	bindAddr              = StringP("bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
-	disableFeatures       = StringP("disable", "", "", "Disable a comma separated list of features.  Use help to see a list.")
-	userAgent             = StringP("user-agent", "", "rclone/"+Version, "Set the user-agent to a specified string. The default is rclone/ version")
-	immutable             = BoolP("immutable", "", false, "Do not modify files. Fail if existing files have been modified.")
-	autoConfirm           = BoolP("auto-confirm", "", false, "If enabled, do not request console confirmation.")
-	statsFileNameLength   = IntP("stats-file-name-length", "", 40, "Max file name length in stats. 0 for no limit")
-	streamingUploadCutoff = SizeSuffix(100 * 1024)
-	dump                  DumpFlags
-	logLevel              = LogLevelNotice
-	statsLogLevel         = LogLevelInfo
-	bwLimit               BwTimetable
-	bufferSize            SizeSuffix = 16 << 20
+	// Read a value from the config file
+	//
+	// This is a function pointer to decouple the config
+	// implementation from the fs
+	ConfigFileGet = func(section, key string, defaultVal ...string) string { return "" }
 
-	// Key to use for password en/decryption.
-	// When nil, no encryption will be used for saving.
-	configKey []byte
+	// CountError counts an error.  If any errors have been
+	// counted then it will exit with a non zero error code.
+	//
+	// This is a function pointer to decouple the config
+	// implementation from the fs
+	CountError = func(err error) {}
 )
 
-func init() {
-	VarP(&logLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
-	VarP(&statsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
-	VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
-	VarP(&bufferSize, "buffer-size", "", "Buffer size when copying files.")
-	VarP(&streamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
-	VarP(&dump, "dump", "", "List of items to dump from: "+dumpFlagsList)
-}
-
-// crypt internals
-var (
-	cryptKey = []byte{
-		0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
-		0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
-		0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
-		0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38,
-	}
-	cryptBlock cipher.Block
-	cryptRand  = rand.Reader
-)
-
-// crypt transforms in to out using iv under AES-CTR.
-//
-// in and out may be the same buffer.
-//
-// Note encryption and decryption are the same operation
-func crypt(out, in, iv []byte) error {
-	if cryptBlock == nil {
-		var err error
-		cryptBlock, err = aes.NewCipher(cryptKey)
-		if err != nil {
-			return err
-		}
-	}
-	stream := cipher.NewCTR(cryptBlock, iv)
-	stream.XORKeyStream(out, in)
-	return nil
-}
-
-// Obscure a value
-//
-// This is done by encrypting with AES-CTR
-func Obscure(x string) (string, error) {
-	plaintext := []byte(x)
-	ciphertext := make([]byte, aes.BlockSize+len(plaintext))
-	iv := ciphertext[:aes.BlockSize]
-	if _, err := io.ReadFull(cryptRand, iv); err != nil {
-		return "", errors.Wrap(err, "failed to read iv")
-	}
-	if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
-		return "", errors.Wrap(err, "encrypt failed")
-	}
-	return base64.RawURLEncoding.EncodeToString(ciphertext), nil
-}
-
-// MustObscure obscures a value, exiting with a fatal error if it failed
-func MustObscure(x string) string {
-	out, err := Obscure(x)
-	if err != nil {
-		log.Fatalf("Obscure failed: %v", err)
-	}
-	return out
-}
-
-// Reveal an obscured value
-func Reveal(x string) (string, error) {
-	ciphertext, err := base64.RawURLEncoding.DecodeString(x)
-	if err != nil {
-		return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
-	}
-	if len(ciphertext) < aes.BlockSize {
-		return "", errors.New("input too short when revealing password - is it obscured?")
-	}
-	buf := ciphertext[aes.BlockSize:]
-	iv := ciphertext[:aes.BlockSize]
-	if err := crypt(buf, buf, iv); err != nil {
-		return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
-	}
-	return string(buf), nil
-}
-
-// MustReveal reveals an obscured value, exiting with a fatal error if it failed
-func MustReveal(x string) string {
-	out, err := Reveal(x)
-	if err != nil {
-		log.Fatalf("Reveal failed: %v", err)
-	}
-	return out
-}
-
 // ConfigInfo is filesystem config options
 type ConfigInfo struct {
 	LogLevel              LogLevel
@@ -232,7 +39,6 @@ type ConfigInfo struct {
 	ConnectTimeout        time.Duration // Connect timeout
 	Timeout               time.Duration // Data channel timeout
 	Dump                  DumpFlags
-	Filter                *Filter
 	InsecureSkipVerify    bool // Skip server certificate verification
 	DeleteMode            DeleteMode
 	TrackRenames          bool // Track file renames.
@@ -249,1309 +55,43 @@ type ConfigInfo struct {
 	Suffix                string
 	UseListR              bool
 	BufferSize            SizeSuffix
+	BwLimit               BwTimetable
 	TPSLimit              float64
 	TPSLimitBurst         int
 	BindAddr              net.IP
 	DisableFeatures       []string
+	UserAgent             string
 	Immutable             bool
 	AutoConfirm           bool
 	StreamingUploadCutoff SizeSuffix
 	StatsFileNameLength   int
+	AskPassword           bool
 }
 
-// Return the path to the configuration file
-func makeConfigPath() string {
-	// Find user's home directory
-	usr, err := user.Current()
-	var homedir string
-	if err == nil {
-		homedir = usr.HomeDir
-	} else {
-		// Fall back to reading $HOME - work around user.Current() not
-		// working for cross compiled binaries on OSX.
-		// https://github.com/golang/go/issues/6376
-		homedir = os.Getenv("HOME")
-	}
+// NewConfig creates a new config with everything set to the default
+// value.  These are the ultimate defaults and are overriden by the
+// config module.
+func NewConfig() *ConfigInfo {
+	c := new(ConfigInfo)
 
-	// Possibly find the user's XDG config paths
-	// See XDG Base Directory specification
-	// https://specifications.freedesktop.org/basedir-spec/latest/
-	xdgdir := os.Getenv("XDG_CONFIG_HOME")
-	var xdgcfgdir string
-	if xdgdir != "" {
-		xdgcfgdir = filepath.Join(xdgdir, "rclone")
-	} else if homedir != "" {
-		xdgdir = filepath.Join(homedir, ".config")
-		xdgcfgdir = filepath.Join(xdgdir, "rclone")
-	}
+	// Set any values which aren't the zero for the type
+	c.LogLevel = LogLevelNotice
+	c.StatsLogLevel = LogLevelInfo
+	c.ModifyWindow = time.Nanosecond
+	c.Checkers = 8
+	c.Transfers = 4
+	c.ConnectTimeout = 60 * time.Second
+	c.Timeout = 5 * 60 * time.Second
+	c.DeleteMode = DeleteModeDefault
+	c.LowLevelRetries = 10
+	c.MaxDepth = -1
+	c.DataRateUnit = "bytes"
+	c.BufferSize = SizeSuffix(16 << 20)
+	c.UserAgent = "rclone/" + Version
+	c.StreamingUploadCutoff = SizeSuffix(100 * 1024)
+	c.StatsFileNameLength = 40
+	c.AskPassword = true
+	c.TPSLimitBurst = 1
 
-	// Use $XDG_CONFIG_HOME/rclone/rclone.conf if already existing
-	var xdgconf string
-	if xdgcfgdir != "" {
-		xdgconf = filepath.Join(xdgcfgdir, configFileName)
-		_, err := os.Stat(xdgconf)
-		if err == nil {
-			return xdgconf
-		}
-	}
-
-	// Use $HOME/.rclone.conf if already existing
-	var homeconf string
-	if homedir != "" {
-		homeconf = filepath.Join(homedir, hiddenConfigFileName)
-		_, err := os.Stat(homeconf)
-		if err == nil {
-			return homeconf
-		}
-	}
-
-	// Try to create $XDG_CONFIG_HOME/rclone/rclone.conf
-	if xdgconf != "" {
-		// xdgconf != "" implies xdgcfgdir != ""
-		err := os.MkdirAll(xdgcfgdir, os.ModePerm)
-		if err == nil {
-			return xdgconf
-		}
-	}
-
-	// Try to create $HOME/.rclone.conf
-	if homeconf != "" {
-		return homeconf
-	}
-
-	// Default to ./.rclone.conf (current working directory)
-	Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
-	Errorf(nil, "Defaulting to storing config in current directory.")
-	Errorf(nil, "Use -config flag to workaround.")
-	Errorf(nil, "Error was: %v", err)
-	return hiddenConfigFileName
+	return c
 }
-
-// DeleteMode describes the possible delete modes in the config
-type DeleteMode byte
-
-// DeleteMode constants
-const (
-	DeleteModeOff DeleteMode = iota
-	DeleteModeBefore
-	DeleteModeDuring
-	DeleteModeAfter
-	DeleteModeOnly
-	DeleteModeDefault = DeleteModeAfter
-)
-
-// LoadConfig loads the config file
-func LoadConfig() {
-	// Read some flags if set
-	//
-	// FIXME read these from the config file too
-	Config.LogLevel = LogLevelNotice
-	if *verbose >= 2 {
-		Config.LogLevel = LogLevelDebug
-	} else if *verbose >= 1 {
-		Config.LogLevel = LogLevelInfo
-	}
-	if *quiet {
-		if *verbose > 0 {
-			log.Fatalf("Can't set -v and -q")
-		}
-		Config.LogLevel = LogLevelError
-	}
-	logLevelFlag := pflag.Lookup("log-level")
-	if logLevelFlag != nil && logLevelFlag.Changed {
-		if *verbose > 0 {
-			log.Fatalf("Can't set -v and --log-level")
-		}
-		if *quiet {
-			log.Fatalf("Can't set -q and --log-level")
-		}
-		Config.LogLevel = logLevel
-	}
-	Config.StatsLogLevel = statsLogLevel
-	Config.ModifyWindow = *modifyWindow
-	Config.Checkers = *checkers
-	Config.Transfers = *transfers
-	Config.DryRun = *dryRun
-	Config.Timeout = *timeout
-	Config.ConnectTimeout = *connectTimeout
-	Config.CheckSum = *checkSum
-	Config.SizeOnly = *sizeOnly
-	Config.IgnoreTimes = *ignoreTimes
-	Config.IgnoreExisting = *ignoreExisting
-	Config.InsecureSkipVerify = *skipVerify
-	Config.LowLevelRetries = *lowLevelRetries
-	Config.UpdateOlder = *updateOlder
-	Config.NoGzip = *noGzip
-	Config.MaxDepth = *maxDepth
-	Config.IgnoreSize = *ignoreSize
-	Config.IgnoreChecksum = *ignoreChecksum
-	Config.NoTraverse = *noTraverse
-	Config.NoUpdateModTime = *noUpdateModTime
-	Config.BackupDir = *backupDir
-	Config.Suffix = *suffix
-	Config.UseListR = *useListR
-	Config.TPSLimit = *tpsLimit
-	Config.TPSLimitBurst = *tpsLimitBurst
-	Config.Immutable = *immutable
-	Config.AutoConfirm = *autoConfirm
-	Config.StatsFileNameLength = *statsFileNameLength
-	Config.BufferSize = bufferSize
-	Config.StreamingUploadCutoff = streamingUploadCutoff
-	Config.Dump = dump
-	if *dumpHeaders {
-		Config.Dump |= DumpHeaders
-		Infof(nil, "--dump-headers is obsolete - please use --dump headers instead")
-	}
-	if *dumpBodies {
-		Config.Dump |= DumpBodies
-		Infof(nil, "--dump-bodies is obsolete - please use --dump bodies instead")
-	}
-
-	Config.TrackRenames = *trackRenames
-
-	switch {
-	case *deleteBefore && (*deleteDuring || *deleteAfter),
-		*deleteDuring && *deleteAfter:
-		log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
-	case *deleteBefore:
-		Config.DeleteMode = DeleteModeBefore
-	case *deleteDuring:
-		Config.DeleteMode = DeleteModeDuring
-	case *deleteAfter:
-		Config.DeleteMode = DeleteModeAfter
-	default:
-		Config.DeleteMode = DeleteModeDefault
-	}
-
-	if Config.IgnoreSize && Config.SizeOnly {
-		log.Fatalf(`Can't use --size-only and --ignore-size together.`)
-	}
-
-	if Config.Suffix != "" && Config.BackupDir == "" {
-		log.Fatalf(`Can only use --suffix with --backup-dir.`)
-	}
-
-	if *bindAddr != "" {
-		addrs, err := net.LookupIP(*bindAddr)
-		if err != nil {
-			log.Fatalf("--bind: Failed to parse %q as IP address: %v", *bindAddr, err)
-		}
-		if len(addrs) != 1 {
-			log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", *bindAddr, len(addrs))
-		}
-		Config.BindAddr = addrs[0]
-	}
-
-	if *disableFeatures != "" {
-		if *disableFeatures == "help" {
-			log.Fatalf("Possible backend features are: %s\n", strings.Join(new(Features).List(), ", "))
-		}
-		Config.DisableFeatures = strings.Split(*disableFeatures, ",")
-	}
-
-	// Load configuration file.
-	var err error
-	ConfigPath, err = filepath.Abs(*configFile)
-	if err != nil {
-		ConfigPath = *configFile
-	}
-	configData, err = loadConfigFile()
-	if err == errorConfigFileNotFound {
-		Logf(nil, "Config file %q not found - using defaults", ConfigPath)
-		configData, _ = goconfig.LoadFromReader(&bytes.Buffer{})
-	} else if err != nil {
-		log.Fatalf("Failed to load config file %q: %v", ConfigPath, err)
-	} else {
-		Debugf(nil, "Using config file from %q", ConfigPath)
-	}
-
-	// Load cache directory from flags
-	CacheDir = *cacheDir
-
-	// Load filters
-	Config.Filter, err = NewFilter()
-	if err != nil {
-		log.Fatalf("Failed to load filters: %v", err)
-	}
-
-	// Start the token bucket limiter
-	startTokenBucket()
-
-	// Start the bandwidth update ticker
-	startTokenTicker()
-
-	// Start the transactions per second limiter
-	startHTTPTokenBucket()
-}
-
-var errorConfigFileNotFound = errors.New("config file not found")
-
-// loadConfigFile will load a config file, and
-// automatically decrypt it.
-func loadConfigFile() (*goconfig.ConfigFile, error) {
-	b, err := ioutil.ReadFile(ConfigPath)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return nil, errorConfigFileNotFound
-		}
-		return nil, err
-	}
-
-	// Find first non-empty line
-	r := bufio.NewReader(bytes.NewBuffer(b))
-	for {
-		line, _, err := r.ReadLine()
-		if err != nil {
-			if err == io.EOF {
-				return goconfig.LoadFromReader(bytes.NewBuffer(b))
-			}
-			return nil, err
-		}
-		l := strings.TrimSpace(string(line))
-		if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") {
-			continue
-		}
-		// First non-empty or non-comment must be ENCRYPT_V0
-		if l == "RCLONE_ENCRYPT_V0:" {
-			break
-		}
-		if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
-			return nil, errors.New("unsupported configuration encryption - update rclone for support")
-		}
-		return goconfig.LoadFromReader(bytes.NewBuffer(b))
-	}
-
-	// Encrypted content is base64 encoded.
-	dec := base64.NewDecoder(base64.StdEncoding, r)
-	box, err := ioutil.ReadAll(dec)
-	if err != nil {
-		return nil, errors.Wrap(err, "failed to load base64 encoded data")
-	}
-	if len(box) < 24+secretbox.Overhead {
-		return nil, errors.New("Configuration data too short")
-	}
-	envpw := os.Getenv("RCLONE_CONFIG_PASS")
-
-	var out []byte
-	for {
-		if len(configKey) == 0 && envpw != "" {
-			err := setConfigPassword(envpw)
-			if err != nil {
-				fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
-			} else {
-				Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
-			}
-		}
-		if len(configKey) == 0 {
-			if !*AskPassword {
-				return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
-			}
-			getConfigPassword("Enter configuration password:")
-		}
-
-		// Nonce is first 24 bytes of the ciphertext
-		var nonce [24]byte
-		copy(nonce[:], box[:24])
-		var key [32]byte
-		copy(key[:], configKey[:32])
-
-		// Attempt to decrypt
-		var ok bool
-		out, ok = secretbox.Open(nil, box[24:], &nonce, &key)
-		if ok {
-			break
-		}
-
-		// Retry
-		Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.")
-		configKey = nil
-		envpw = ""
-	}
-	return goconfig.LoadFromReader(bytes.NewBuffer(out))
-}
-
-// checkPassword normalises and validates the password
-func checkPassword(password string) (string, error) {
-	if !utf8.ValidString(password) {
-		return "", errors.New("password contains invalid utf8 characters")
-	}
-	// Check for leading/trailing whitespace
-	trimmedPassword := strings.TrimSpace(password)
-	// Warn user if password has leading+trailing whitespace
-	if len(password) != len(trimmedPassword) {
-		fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped")
-	}
-	// Normalize to reduce weird variations.
-	password = norm.NFKC.String(password)
-	if len(password) == 0 || len(trimmedPassword) == 0 {
-		return "", errors.New("no characters in password")
-	}
-	return password, nil
-}
-
-// GetPassword asks the user for a password with the prompt given.
-func GetPassword(prompt string) string {
-	fmt.Fprintln(os.Stderr, prompt)
-	for {
-		fmt.Fprint(os.Stderr, "password:")
-		password := ReadPassword()
-		password, err := checkPassword(password)
-		if err == nil {
-			return password
-		}
-		fmt.Fprintf(os.Stderr, "Bad password: %v\n", err)
-	}
-}
-
-// ChangePassword will query the user twice for the named password. If
-// the same password is entered it is returned.
-func ChangePassword(name string) string {
-	for {
-		a := GetPassword(fmt.Sprintf("Enter %s password:", name))
-		b := GetPassword(fmt.Sprintf("Confirm %s password:", name))
-		if a == b {
-			return a
-		}
-		fmt.Println("Passwords do not match!")
-	}
-}
-
-// getConfigPassword will query the user for a password the
-// first time it is required.
-func getConfigPassword(q string) {
-	if len(configKey) != 0 {
-		return
-	}
-	for {
-		password := GetPassword(q)
-		err := setConfigPassword(password)
-		if err == nil {
-			return
-		}
-		fmt.Fprintln(os.Stderr, "Error:", err)
-	}
-}
-
-// setConfigPassword will set the configKey to the hash of
-// the password. If the length of the password is
-// zero after trimming+normalization, an error is returned.
-func setConfigPassword(password string) error {
-	password, err := checkPassword(password)
-	if err != nil {
-		return err
-	}
-	// Create SHA256 has of the password
-	sha := sha256.New()
-	_, err = sha.Write([]byte("[" + password + "][rclone-config]"))
-	if err != nil {
-		return err
-	}
-	configKey = sha.Sum(nil)
-	return nil
-}
-
-// changeConfigPassword will query the user twice
-// for a password. If the same password is entered
-// twice the key is updated.
-func changeConfigPassword() {
-	err := setConfigPassword(ChangePassword("NEW configuration"))
-	if err != nil {
-		fmt.Printf("Failed to set config password: %v\n", err)
-		return
-	}
-}
-
-// SaveConfig saves configuration file.
-// if configKey has been set, the file will be encrypted.
-func SaveConfig() {
-	dir, name := filepath.Split(ConfigPath)
-	f, err := ioutil.TempFile(dir, name)
-	if err != nil {
-		log.Fatalf("Failed to create temp file for new config: %v", err)
-		return
-	}
-	defer func() {
-		if err := os.Remove(f.Name()); err != nil && !os.IsNotExist(err) {
-			Errorf(nil, "Failed to remove temp config file: %v", err)
-		}
-	}()
-
-	var buf bytes.Buffer
-	err = goconfig.SaveConfigData(configData, &buf)
-	if err != nil {
-		log.Fatalf("Failed to save config file: %v", err)
-	}
-
-	if len(configKey) == 0 {
-		if _, err := buf.WriteTo(f); err != nil {
-			log.Fatalf("Failed to write temp config file: %v", err)
-		}
-	} else {
-		fmt.Fprintln(f, "# Encrypted rclone configuration File")
-		fmt.Fprintln(f, "")
-		fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:")
-
-		// Generate new nonce and write it to the start of the ciphertext
-		var nonce [24]byte
-		n, _ := rand.Read(nonce[:])
-		if n != 24 {
-			log.Fatalf("nonce short read: %d", n)
-		}
-		enc := base64.NewEncoder(base64.StdEncoding, f)
-		_, err = enc.Write(nonce[:])
-		if err != nil {
-			log.Fatalf("Failed to write temp config file: %v", err)
-		}
-
-		var key [32]byte
-		copy(key[:], configKey[:32])
-
-		b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key)
-		_, err = enc.Write(b)
-		if err != nil {
-			log.Fatalf("Failed to write temp config file: %v", err)
-		}
-		_ = enc.Close()
-	}
-
-	err = f.Close()
-	if err != nil {
-		log.Fatalf("Failed to close config file: %v", err)
-	}
-
-	var fileMode os.FileMode = 0600
-	info, err := os.Stat(ConfigPath)
-	if err != nil {
-		Debugf(nil, "Using default permissions for config file: %v", fileMode)
-	} else if info.Mode() != fileMode {
-		Debugf(nil, "Keeping previous permissions for config file: %v", info.Mode())
-		fileMode = info.Mode()
-	}
-
-	attemptCopyGroup(ConfigPath, f.Name())
-
-	err = os.Chmod(f.Name(), fileMode)
-	if err != nil {
-		Errorf(nil, "Failed to set permissions on config file: %v", err)
-	}
-
-	if err = os.Rename(ConfigPath, ConfigPath+".old"); err != nil && !os.IsNotExist(err) {
-		log.Fatalf("Failed to move previous config to backup location: %v", err)
-	}
-	if err = os.Rename(f.Name(), ConfigPath); err != nil {
-		log.Fatalf("Failed to move newly written config from %s to final location: %v", f.Name(), err)
-	}
-	if err := os.Remove(ConfigPath + ".old"); err != nil && !os.IsNotExist(err) {
-		Errorf(nil, "Failed to remove backup config file: %v", err)
-	}
-}
-
-// ConfigSetValueAndSave sets the key to the value and saves just that
-// value in the config file.  It loads the old config file in from
-// disk first and overwrites the given value only.
-func ConfigSetValueAndSave(name, key, value string) (err error) {
-	// Set the value in config in case we fail to reload it
-	configData.SetValue(name, key, value)
-	// Reload the config file
-	reloadedConfigFile, err := loadConfigFile()
-	if err == errorConfigFileNotFound {
-		// Config file not written yet so ignore reload
-		return nil
-	} else if err != nil {
-		return err
-	}
-	_, err = reloadedConfigFile.GetSection(name)
-	if err != nil {
-		// Section doesn't exist yet so ignore reload
-		return err
-	}
-	// Update the config file with the reloaded version
-	configData = reloadedConfigFile
-	// Set the value in the reloaded version
-	reloadedConfigFile.SetValue(name, key, value)
-	// Save it again
-	SaveConfig()
-	return nil
-}
-
-// ShowRemotes shows an overview of the config file
-func ShowRemotes() {
-	remotes := configData.GetSectionList()
-	if len(remotes) == 0 {
-		return
-	}
-	sort.Strings(remotes)
-	fmt.Printf("%-20s %s\n", "Name", "Type")
-	fmt.Printf("%-20s %s\n", "====", "====")
-	for _, remote := range remotes {
-		fmt.Printf("%-20s %s\n", remote, ConfigFileGet(remote, "type"))
-	}
-}
-
-// ChooseRemote chooses a remote name
-func ChooseRemote() string {
-	remotes := configData.GetSectionList()
-	sort.Strings(remotes)
-	return Choose("remote", remotes, nil, false)
-}
-
-// ReadLine reads some input
-var ReadLine = func() string {
-	buf := bufio.NewReader(os.Stdin)
-	line, err := buf.ReadString('\n')
-	if err != nil {
-		log.Fatalf("Failed to read line: %v", err)
-	}
-	return strings.TrimSpace(line)
-}
-
-// Command - choose one
-func Command(commands []string) byte {
-	opts := []string{}
-	for _, text := range commands {
-		fmt.Printf("%c) %s\n", text[0], text[1:])
-		opts = append(opts, text[:1])
-	}
-	optString := strings.Join(opts, "")
-	optHelp := strings.Join(opts, "/")
-	for {
-		fmt.Printf("%s> ", optHelp)
-		result := strings.ToLower(ReadLine())
-		if len(result) != 1 {
-			continue
-		}
-		i := strings.Index(optString, string(result[0]))
-		if i >= 0 {
-			return result[0]
-		}
-	}
-}
-
-// Confirm asks the user for Yes or No and returns true or false
-func Confirm() bool {
-	if Config.AutoConfirm {
-		return true
-	}
-	return Command([]string{"yYes", "nNo"}) == 'y'
-}
-
-// Choose one of the defaults or type a new string if newOk is set
-func Choose(what string, defaults, help []string, newOk bool) string {
-	valueDescripton := "an existing"
-	if newOk {
-		valueDescripton = "your own"
-	}
-	fmt.Printf("Choose a number from below, or type in %s value\n", valueDescripton)
-	for i, text := range defaults {
-		var lines []string
-		if help != nil {
-			parts := strings.Split(help[i], "\n")
-			lines = append(lines, parts...)
-		}
-		lines = append(lines, fmt.Sprintf("%q", text))
-		pos := i + 1
-		if len(lines) == 1 {
-			fmt.Printf("%2d > %s\n", pos, text)
-		} else {
-			mid := (len(lines) - 1) / 2
-			for i, line := range lines {
-				var sep rune
-				switch i {
-				case 0:
-					sep = '/'
-				case len(lines) - 1:
-					sep = '\\'
-				default:
-					sep = '|'
-				}
-				number := "  "
-				if i == mid {
-					number = fmt.Sprintf("%2d", pos)
-				}
-				fmt.Printf("%s %c %s\n", number, sep, line)
-			}
-		}
-	}
-	for {
-		fmt.Printf("%s> ", what)
-		result := ReadLine()
-		i, err := strconv.Atoi(result)
-		if err != nil {
-			if newOk {
-				return result
-			}
-			for _, v := range defaults {
-				if result == v {
-					return result
-				}
-			}
-			continue
-		}
-		if i >= 1 && i <= len(defaults) {
-			return defaults[i-1]
-		}
-	}
-}
-
-// ChooseNumber asks the user to enter a number between min and max
-// inclusive prompting them with what.
-func ChooseNumber(what string, min, max int) int {
-	for {
-		fmt.Printf("%s> ", what)
-		result := ReadLine()
-		i, err := strconv.Atoi(result)
-		if err != nil {
-			fmt.Printf("Bad number: %v\n", err)
-			continue
-		}
-		if i < min || i > max {
-			fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
-			continue
-		}
-		return i
-	}
-}
-
-// ShowRemote shows the contents of the remote
-func ShowRemote(name string) {
-	fmt.Printf("--------------------\n")
-	fmt.Printf("[%s]\n", name)
-	fs := MustFindByName(name)
-	for _, key := range configData.GetKeyList(name) {
-		isPassword := false
-		for _, option := range fs.Options {
-			if option.Name == key && option.IsPassword {
-				isPassword = true
-				break
-			}
-		}
-		value := ConfigFileGet(name, key)
-		if isPassword && value != "" {
-			fmt.Printf("%s = *** ENCRYPTED ***\n", key)
-		} else {
-			fmt.Printf("%s = %s\n", key, value)
-		}
-	}
-	fmt.Printf("--------------------\n")
-}
-
-// OkRemote prints the contents of the remote and ask if it is OK
-func OkRemote(name string) bool {
-	ShowRemote(name)
-	switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
-	case 'y':
-		return true
-	case 'e':
-		return false
-	case 'd':
-		configData.DeleteSection(name)
-		return true
-	default:
-		Errorf(nil, "Bad choice %c", i)
-	}
-	return false
-}
-
-// MustFindByName finds the RegInfo for the remote name passed in or
-// exits with a fatal error.
-func MustFindByName(name string) *RegInfo {
-	fsType := ConfigFileGet(name, "type")
-	if fsType == "" {
-		log.Fatalf("Couldn't find type of fs for %q", name)
-	}
-	return MustFind(fsType)
-}
-
-// RemoteConfig runs the config helper for the remote if needed
-func RemoteConfig(name string) {
-	fmt.Printf("Remote config\n")
-	f := MustFindByName(name)
-	if f.Config != nil {
-		f.Config(name)
-	}
-}
-
-// ChooseOption asks the user to choose an option
-func ChooseOption(o *Option) string {
-	fmt.Println(o.Help)
-	if o.IsPassword {
-		actions := []string{"yYes type in my own password", "gGenerate random password"}
-		if o.Optional {
-			actions = append(actions, "nNo leave this optional password blank")
-		}
-		var password string
-		switch i := Command(actions); i {
-		case 'y':
-			password = ChangePassword("the")
-		case 'g':
-			for {
-				fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n")
-				bits := ChooseNumber("Bits", 64, 1024)
-				bytes := bits / 8
-				if bits%8 != 0 {
-					bytes++
-				}
-				var pw = make([]byte, bytes)
-				n, _ := rand.Read(pw)
-				if n != bytes {
-					log.Fatalf("password short read: %d", n)
-				}
-				password = base64.RawURLEncoding.EncodeToString(pw)
-				fmt.Printf("Your password is: %s\n", password)
-				fmt.Printf("Use this password?\n")
-				if Confirm() {
-					break
-				}
-			}
-		case 'n':
-			return ""
-		default:
-			Errorf(nil, "Bad choice %c", i)
-		}
-		return MustObscure(password)
-	}
-	if len(o.Examples) > 0 {
-		var values []string
-		var help []string
-		for _, example := range o.Examples {
-			values = append(values, example.Value)
-			help = append(help, example.Help)
-		}
-		return Choose(o.Name, values, help, true)
-	}
-	fmt.Printf("%s> ", o.Name)
-	return ReadLine()
-}
-
-// UpdateRemote adds the keyValues passed in to the remote of name.
-// keyValues should be key, value pairs.
-func UpdateRemote(name string, keyValues []string) error {
-	if len(keyValues)%2 != 0 {
-		return errors.New("found key without value")
-	}
-	// Set the config
-	for i := 0; i < len(keyValues); i += 2 {
-		configData.SetValue(name, keyValues[i], keyValues[i+1])
-	}
-	RemoteConfig(name)
-	ShowRemote(name)
-	SaveConfig()
-	return nil
-}
-
-// CreateRemote creates a new remote with name, provider and a list of
-// parameters which are key, value pairs.  If update is set then it
-// adds the new keys rather than replacing all of them.
-func CreateRemote(name string, provider string, keyValues []string) error {
-	// Suppress Confirm
-	Config.AutoConfirm = true
-	// Delete the old config if it exists
-	configData.DeleteSection(name)
-	// Set the type
-	configData.SetValue(name, "type", provider)
-	// Show this is automatically configured
-	configData.SetValue(name, ConfigAutomatic, "yes")
-	// Set the remaining values
-	return UpdateRemote(name, keyValues)
-}
-
-// PasswordRemote adds the keyValues passed in to the remote of name.
-// keyValues should be key, value pairs.
-func PasswordRemote(name string, keyValues []string) error {
-	if len(keyValues) != 2 {
-		return errors.New("found key without value")
-	}
-	// Suppress Confirm
-	Config.AutoConfirm = true
-	passwd := MustObscure(keyValues[1])
-	if passwd != "" {
-		configData.SetValue(name, keyValues[0], passwd)
-		RemoteConfig(name)
-		ShowRemote(name)
-		SaveConfig()
-	}
-	return nil
-}
-
-// JSONListProviders prints all the providers and options in JSON format
-func JSONListProviders() error {
-	b, err := json.MarshalIndent(fsRegistry, "", "    ")
-	if err != nil {
-		return errors.Wrap(err, "failed to marshal examples")
-	}
-	_, err = os.Stdout.Write(b)
-	if err != nil {
-		return errors.Wrap(err, "failed to write providers list")
-	}
-	return nil
-}
-
-// fsOption returns an Option describing the possible remotes
-func fsOption() *Option {
-	o := &Option{
-		Name: "Storage",
-		Help: "Type of storage to configure.",
-	}
-	for _, item := range fsRegistry {
-		example := OptionExample{
-			Value: item.Name,
-			Help:  item.Description,
-		}
-		o.Examples = append(o.Examples, example)
-	}
-	o.Examples.Sort()
-	return o
-}
-
-// NewRemoteName asks the user for a name for a remote
-func NewRemoteName() (name string) {
-	for {
-		fmt.Printf("name> ")
-		name = ReadLine()
-		parts := matcher.FindStringSubmatch(name + ":")
-		switch {
-		case name == "":
-			fmt.Printf("Can't use empty name.\n")
-		case isDriveLetter(name):
-			fmt.Printf("Can't use %q as it can be confused a drive letter.\n", name)
-		case parts == nil:
-			fmt.Printf("Can't use %q as it has invalid characters in it.\n", name)
-		default:
-			return name
-		}
-	}
-}
-
-// NewRemote make a new remote from its name
-func NewRemote(name string) {
-	newType := ChooseOption(fsOption())
-	configData.SetValue(name, "type", newType)
-	fs := MustFind(newType)
-	for _, option := range fs.Options {
-		configData.SetValue(name, option.Name, ChooseOption(&option))
-	}
-	RemoteConfig(name)
-	if OkRemote(name) {
-		SaveConfig()
-		return
-	}
-	EditRemote(fs, name)
-}
-
-// EditRemote gets the user to edit a remote
-func EditRemote(fs *RegInfo, name string) {
-	ShowRemote(name)
-	fmt.Printf("Edit remote\n")
-	for {
-		for _, option := range fs.Options {
-			key := option.Name
-			value := ConfigFileGet(name, key)
-			fmt.Printf("Value %q = %q\n", key, value)
-			fmt.Printf("Edit? (y/n)>\n")
-			if Confirm() {
-				newValue := ChooseOption(&option)
-				configData.SetValue(name, key, newValue)
-			}
-		}
-		if OkRemote(name) {
-			break
-		}
-	}
-	SaveConfig()
-	RemoteConfig(name)
-}
-
-// DeleteRemote gets the user to delete a remote
-func DeleteRemote(name string) {
-	configData.DeleteSection(name)
-	SaveConfig()
-}
-
-// copyRemote asks the user for a new remote name and copies name into
-// it. Returns the new name.
-func copyRemote(name string) string {
-	newName := NewRemoteName()
-	// Copy the keys
-	for _, key := range configData.GetKeyList(name) {
-		value := configData.MustValue(name, key, "")
-		configData.SetValue(newName, key, value)
-	}
-	return newName
-}
-
-// RenameRemote renames a config section
-func RenameRemote(name string) {
-	fmt.Printf("Enter new name for %q remote.\n", name)
-	newName := copyRemote(name)
-	if name != newName {
-		configData.DeleteSection(name)
-		SaveConfig()
-	}
-}
-
-// CopyRemote copies a config section
-func CopyRemote(name string) {
-	fmt.Printf("Enter name for copy of %q remote.\n", name)
-	copyRemote(name)
-	SaveConfig()
-}
-
-// ShowConfigLocation prints the location of the config file in use
-func ShowConfigLocation() {
-	if _, err := os.Stat(ConfigPath); os.IsNotExist(err) {
-		fmt.Println("Configuration file doesn't exist, but rclone will use this path:")
-	} else {
-		fmt.Println("Configuration file is stored at:")
-	}
-	fmt.Printf("%s\n", ConfigPath)
-}
-
-// ShowConfig prints the (unencrypted) config options
-func ShowConfig() {
-	var buf bytes.Buffer
-	if err := goconfig.SaveConfigData(configData, &buf); err != nil {
-		log.Fatalf("Failed to serialize config: %v", err)
-	}
-	str := buf.String()
-	if str == "" {
-		str = "; empty config\n"
-	}
-	fmt.Printf("%s", str)
-}
-
-// EditConfig edits the config file interactively
-func EditConfig() {
-	for {
-		haveRemotes := len(configData.GetSectionList()) != 0
-		what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
-		if haveRemotes {
-			fmt.Printf("Current remotes:\n\n")
-			ShowRemotes()
-			fmt.Printf("\n")
-		} else {
-			fmt.Printf("No remotes found - make a new one\n")
-			// take 2nd item and last 2 items of menu list
-			what = append(what[1:2], what[len(what)-2:]...)
-		}
-		switch i := Command(what); i {
-		case 'e':
-			name := ChooseRemote()
-			fs := MustFindByName(name)
-			EditRemote(fs, name)
-		case 'n':
-			NewRemote(NewRemoteName())
-		case 'd':
-			name := ChooseRemote()
-			DeleteRemote(name)
-		case 'r':
-			RenameRemote(ChooseRemote())
-		case 'c':
-			CopyRemote(ChooseRemote())
-		case 's':
-			SetPassword()
-		case 'q':
-			return
-
-		}
-	}
-}
-
-// SetPassword will allow the user to modify the current
-// configuration encryption settings.
-func SetPassword() {
-	for {
-		if len(configKey) > 0 {
-			fmt.Println("Your configuration is encrypted.")
-			what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
-			switch i := Command(what); i {
-			case 'c':
-				changeConfigPassword()
-				SaveConfig()
-				fmt.Println("Password changed")
-				continue
-			case 'u':
-				configKey = nil
-				SaveConfig()
-				continue
-			case 'q':
-				return
-			}
-
-		} else {
-			fmt.Println("Your configuration is not encrypted.")
-			fmt.Println("If you add a password, you will protect your login information to cloud services.")
-			what := []string{"aAdd Password", "qQuit to main menu"}
-			switch i := Command(what); i {
-			case 'a':
-				changeConfigPassword()
-				SaveConfig()
-				fmt.Println("Password set")
-				continue
-			case 'q':
-				return
-			}
-		}
-	}
-}
-
-// Authorize is for remote authorization of headless machines.
-//
-// It expects 1 or 3 arguments
-//
-//   rclone authorize "fs name"
-//   rclone authorize "fs name" "client id" "client secret"
-func Authorize(args []string) {
-	switch len(args) {
-	case 1, 3:
-	default:
-		log.Fatalf("Invalid number of arguments: %d", len(args))
-	}
-	newType := args[0]
-	fs := MustFind(newType)
-	if fs.Config == nil {
-		log.Fatalf("Can't authorize fs %q", newType)
-	}
-	// Name used for temporary fs
-	name := "**temp-fs**"
-
-	// Make sure we delete it
-	defer DeleteRemote(name)
-
-	// Indicate that we want fully automatic configuration.
-	configData.SetValue(name, ConfigAutomatic, "yes")
-	if len(args) == 3 {
-		configData.SetValue(name, ConfigClientID, args[1])
-		configData.SetValue(name, ConfigClientSecret, args[2])
-	}
-	fs.Config(name)
-}
-
-// configToEnv converts an config section and name, eg ("myremote",
-// "ignore-size") into an environment name
-// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
-func configToEnv(section, name string) string {
-	return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
-}
-
-// ConfigFileGet gets the config key under section returning the
-// default or empty string if not set.
-//
-// It looks up defaults in the environment if they are present
-func ConfigFileGet(section, key string, defaultVal ...string) string {
-	envKey := configToEnv(section, key)
-	newValue, found := os.LookupEnv(envKey)
-	if found {
-		defaultVal = []string{newValue}
-	}
-	return configData.MustValue(section, key, defaultVal...)
-}
-
-// ConfigFileGetBool gets the config key under section returning the
-// default or false if not set.
-//
-// It looks up defaults in the environment if they are present
-func ConfigFileGetBool(section, key string, defaultVal ...bool) bool {
-	envKey := configToEnv(section, key)
-	newValue, found := os.LookupEnv(envKey)
-	if found {
-		newBool, err := strconv.ParseBool(newValue)
-		if err != nil {
-			Errorf(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err)
-		} else {
-			defaultVal = []bool{newBool}
-		}
-	}
-	return configData.MustBool(section, key, defaultVal...)
-}
-
-// ConfigFileGetInt gets the config key under section returning the
-// default or 0 if not set.
-//
-// It looks up defaults in the environment if they are present
-func ConfigFileGetInt(section, key string, defaultVal ...int) int {
-	envKey := configToEnv(section, key)
-	newValue, found := os.LookupEnv(envKey)
-	if found {
-		newInt, err := strconv.Atoi(newValue)
-		if err != nil {
-			Errorf(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err)
-		} else {
-			defaultVal = []int{newInt}
-		}
-	}
-	return configData.MustInt(section, key, defaultVal...)
-}
-
-// ConfigFileSet sets the key in section to value.  It doesn't save
-// the config file.
-func ConfigFileSet(section, key, value string) {
-	configData.SetValue(section, key, value)
-}
-
-// ConfigFileDeleteKey deletes the config key in the config file.
-// It returns true if the key was deleted,
-// or returns false if the section or key didn't exist.
-func ConfigFileDeleteKey(section, key string) bool {
-	return configData.DeleteKey(section, key)
-}
-
-var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`)
-
-// ConfigFileSections returns the sections in the config file
-// including any defined by environment variables.
-func ConfigFileSections() []string {
-	sections := configData.GetSectionList()
-	for _, item := range os.Environ() {
-		matches := matchEnv.FindStringSubmatch(item)
-		if len(matches) == 2 {
-			sections = append(sections, strings.ToLower(matches[1]))
-		}
-	}
-	return sections
-}
-
-// ConfigDump dumps all the config as a JSON file
-func ConfigDump() error {
-	dump := make(map[string]map[string]string)
-	for _, name := range configData.GetSectionList() {
-		params := make(map[string]string)
-		for _, key := range configData.GetKeyList(name) {
-			params[key] = ConfigFileGet(name, key)
-		}
-		dump[name] = params
-	}
-	b, err := json.MarshalIndent(dump, "", "    ")
-	if err != nil {
-		return errors.Wrap(err, "failed to marshal config dump")
-	}
-	_, err = os.Stdout.Write(b)
-	if err != nil {
-		return errors.Wrap(err, "failed to write config dump")
-	}
-	return nil
-}
-
-// makeCacheDir returns a directory to use for caching.
-//
-// Code borrowed from go stdlib until it is made public
-func makeCacheDir() (dir string) {
-	// Compute default location.
-	switch runtime.GOOS {
-	case "windows":
-		dir = os.Getenv("LocalAppData")
-
-	case "darwin":
-		dir = os.Getenv("HOME")
-		if dir != "" {
-			dir += "/Library/Caches"
-		}
-
-	case "plan9":
-		dir = os.Getenv("home")
-		if dir != "" {
-			// Plan 9 has no established per-user cache directory,
-			// but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix.
-			dir += "/lib/cache"
-		}
-
-	default: // Unix
-		// https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-		dir = os.Getenv("XDG_CACHE_HOME")
-		if dir == "" {
-			dir = os.Getenv("HOME")
-			if dir != "" {
-				dir += "/.cache"
-			}
-		}
-	}
-
-	// if no dir found then use TempDir - we will have a cachedir!
-	if dir == "" {
-		dir = os.TempDir()
-	}
-	return filepath.Join(dir, "rclone")
-}
-
-// DumpFlags describes the Dump options in force
-type DumpFlags int
-
-// DumpFlags definitions
-const (
-	DumpHeaders DumpFlags = 1 << iota
-	DumpBodies
-	DumpRequests
-	DumpResponses
-	DumpAuth
-	DumpFilters
-)
-
-var dumpFlags = []struct {
-	flag DumpFlags
-	name string
-}{
-	{DumpHeaders, "headers"},
-	{DumpBodies, "bodies"},
-	{DumpRequests, "requests"},
-	{DumpResponses, "responses"},
-	{DumpAuth, "auth"},
-	{DumpFilters, "filters"},
-}
-
-// list of dump flags used in the help
-var dumpFlagsList string
-
-func init() {
-	// calculate the dump flags list
-	var out []string
-	for _, info := range dumpFlags {
-		out = append(out, info.name)
-	}
-	dumpFlagsList = strings.Join(out, ",")
-}
-
-// String turns a DumpFlags into a string
-func (f DumpFlags) String() string {
-	var out []string
-	for _, info := range dumpFlags {
-		if f&info.flag != 0 {
-			out = append(out, info.name)
-			f &^= info.flag
-		}
-	}
-	if f != 0 {
-		out = append(out, fmt.Sprintf("Unknown-0x%X", int(f)))
-	}
-	return strings.Join(out, ",")
-}
-
-// Set a DumpFlags as a comma separated list of flags
-func (f *DumpFlags) Set(s string) error {
-	var flags DumpFlags
-	parts := strings.Split(s, ",")
-	for _, part := range parts {
-		found := false
-		part = strings.ToLower(strings.TrimSpace(part))
-		if part == "" {
-			continue
-		}
-		for _, info := range dumpFlags {
-			if part == info.name {
-				found = true
-				flags |= info.flag
-			}
-		}
-		if !found {
-			return errors.Errorf("Unknown dump flag %q", part)
-		}
-	}
-	*f = flags
-	return nil
-}
-
-// Type of the value
-func (f *DumpFlags) Type() string {
-	return "string"
-}
-
-// Check it satisfies the interface
-var _ pflag.Value = (*DumpFlags)(nil)
diff --git a/fs/config/config.go b/fs/config/config.go
new file mode 100644
index 000000000..40a3951b4
--- /dev/null
+++ b/fs/config/config.go
@@ -0,0 +1,1159 @@
+// Package config reads, writes and edits the config file and deals with command line flags
+package config
+
+import (
+	"bufio"
+	"bytes"
+	"crypto/rand"
+	"crypto/sha256"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/user"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/Unknwon/goconfig"
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/driveletter"
+	"github.com/ncw/rclone/fs/fshttp"
+	"github.com/pkg/errors"
+	"golang.org/x/crypto/nacl/secretbox"
+	"golang.org/x/text/unicode/norm"
+)
+
+const (
+	configFileName       = "rclone.conf"
+	hiddenConfigFileName = "." + configFileName
+
+	// ConfigToken is the key used to store the token under
+	ConfigToken = "token"
+
+	// ConfigClientID is the config key used to store the client id
+	ConfigClientID = "client_id"
+
+	// ConfigClientSecret is the config key used to store the client secret
+	ConfigClientSecret = "client_secret"
+
+	// ConfigAuthURL is the config key used to store the auth server endpoint
+	ConfigAuthURL = "auth_url"
+
+	// ConfigTokenURL is the config key used to store the token server endpoint
+	ConfigTokenURL = "token_url"
+
+	// ConfigAutomatic indicates that we want non-interactive configuration
+	ConfigAutomatic = "config_automatic"
+)
+
+// Global
+var (
+	// configData is the config file data structure
+	configData *goconfig.ConfigFile
+
+	// ConfigPath points to the config file
+	ConfigPath = makeConfigPath()
+
+	// CacheDir points to the cache directory.  Users of this
+	// should make a subdirectory and use MkdirAll() to create it
+	// and any parents.
+	CacheDir = makeCacheDir()
+
+	// Key to use for password en/decryption.
+	// When nil, no encryption will be used for saving.
+	configKey []byte
+)
+
+func init() {
+	// Set the function pointer up in fs
+	fs.ConfigFileGet = FileGet
+}
+
+// Return the path to the configuration file
+func makeConfigPath() string {
+	// Find user's home directory
+	usr, err := user.Current()
+	var homedir string
+	if err == nil {
+		homedir = usr.HomeDir
+	} else {
+		// Fall back to reading $HOME - work around user.Current() not
+		// working for cross compiled binaries on OSX.
+		// https://github.com/golang/go/issues/6376
+		homedir = os.Getenv("HOME")
+	}
+
+	// Possibly find the user's XDG config paths
+	// See XDG Base Directory specification
+	// https://specifications.freedesktop.org/basedir-spec/latest/
+	xdgdir := os.Getenv("XDG_CONFIG_HOME")
+	var xdgcfgdir string
+	if xdgdir != "" {
+		xdgcfgdir = filepath.Join(xdgdir, "rclone")
+	} else if homedir != "" {
+		xdgdir = filepath.Join(homedir, ".config")
+		xdgcfgdir = filepath.Join(xdgdir, "rclone")
+	}
+
+	// Use $XDG_CONFIG_HOME/rclone/rclone.conf if already existing
+	var xdgconf string
+	if xdgcfgdir != "" {
+		xdgconf = filepath.Join(xdgcfgdir, configFileName)
+		_, err := os.Stat(xdgconf)
+		if err == nil {
+			return xdgconf
+		}
+	}
+
+	// Use $HOME/.rclone.conf if already existing
+	var homeconf string
+	if homedir != "" {
+		homeconf = filepath.Join(homedir, hiddenConfigFileName)
+		_, err := os.Stat(homeconf)
+		if err == nil {
+			return homeconf
+		}
+	}
+
+	// Try to create $XDG_CONFIG_HOME/rclone/rclone.conf
+	if xdgconf != "" {
+		// xdgconf != "" implies xdgcfgdir != ""
+		err := os.MkdirAll(xdgcfgdir, os.ModePerm)
+		if err == nil {
+			return xdgconf
+		}
+	}
+
+	// Try to create $HOME/.rclone.conf
+	if homeconf != "" {
+		return homeconf
+	}
+
+	// Default to ./.rclone.conf (current working directory)
+	fs.Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
+	fs.Errorf(nil, "Defaulting to storing config in current directory.")
+	fs.Errorf(nil, "Use -config flag to workaround.")
+	fs.Errorf(nil, "Error was: %v", err)
+	return hiddenConfigFileName
+}
+
+// LoadConfig loads the config file
+func LoadConfig() {
+	// Load configuration file.
+	var err error
+	configData, err = loadConfigFile()
+	if err == errorConfigFileNotFound {
+		fs.Logf(nil, "Config file %q not found - using defaults", ConfigPath)
+		configData, _ = goconfig.LoadFromReader(&bytes.Buffer{})
+	} else if err != nil {
+		log.Fatalf("Failed to load config file %q: %v", ConfigPath, err)
+	} else {
+		fs.Debugf(nil, "Using config file from %q", ConfigPath)
+	}
+
+	// Start the token bucket limiter
+	accounting.StartTokenBucket()
+
+	// Start the bandwidth update ticker
+	accounting.StartTokenTicker()
+
+	// Start the transactions per second limiter
+	fshttp.StartHTTPTokenBucket()
+}
+
+var errorConfigFileNotFound = errors.New("config file not found")
+
+// loadConfigFile will load a config file, and
+// automatically decrypt it.
+func loadConfigFile() (*goconfig.ConfigFile, error) {
+	b, err := ioutil.ReadFile(ConfigPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil, errorConfigFileNotFound
+		}
+		return nil, err
+	}
+
+	// Find first non-empty line
+	r := bufio.NewReader(bytes.NewBuffer(b))
+	for {
+		line, _, err := r.ReadLine()
+		if err != nil {
+			if err == io.EOF {
+				return goconfig.LoadFromReader(bytes.NewBuffer(b))
+			}
+			return nil, err
+		}
+		l := strings.TrimSpace(string(line))
+		if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") {
+			continue
+		}
+		// First non-empty or non-comment must be ENCRYPT_V0
+		if l == "RCLONE_ENCRYPT_V0:" {
+			break
+		}
+		if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
+			return nil, errors.New("unsupported configuration encryption - update rclone for support")
+		}
+		return goconfig.LoadFromReader(bytes.NewBuffer(b))
+	}
+
+	// Encrypted content is base64 encoded.
+	dec := base64.NewDecoder(base64.StdEncoding, r)
+	box, err := ioutil.ReadAll(dec)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to load base64 encoded data")
+	}
+	if len(box) < 24+secretbox.Overhead {
+		return nil, errors.New("Configuration data too short")
+	}
+	envpw := os.Getenv("RCLONE_CONFIG_PASS")
+
+	var out []byte
+	for {
+		if len(configKey) == 0 && envpw != "" {
+			err := setConfigPassword(envpw)
+			if err != nil {
+				fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
+			} else {
+				fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
+			}
+		}
+		if len(configKey) == 0 {
+			if !fs.Config.AskPassword {
+				return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
+			}
+			getConfigPassword("Enter configuration password:")
+		}
+
+		// Nonce is first 24 bytes of the ciphertext
+		var nonce [24]byte
+		copy(nonce[:], box[:24])
+		var key [32]byte
+		copy(key[:], configKey[:32])
+
+		// Attempt to decrypt
+		var ok bool
+		out, ok = secretbox.Open(nil, box[24:], &nonce, &key)
+		if ok {
+			break
+		}
+
+		// Retry
+		fs.Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.")
+		configKey = nil
+		envpw = ""
+	}
+	return goconfig.LoadFromReader(bytes.NewBuffer(out))
+}
+
+// checkPassword normalises and validates the password
+func checkPassword(password string) (string, error) {
+	if !utf8.ValidString(password) {
+		return "", errors.New("password contains invalid utf8 characters")
+	}
+	// Check for leading/trailing whitespace
+	trimmedPassword := strings.TrimSpace(password)
+	// Warn user if password has leading+trailing whitespace
+	if len(password) != len(trimmedPassword) {
+		fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped")
+	}
+	// Normalize to reduce weird variations.
+	password = norm.NFKC.String(password)
+	if len(password) == 0 || len(trimmedPassword) == 0 {
+		return "", errors.New("no characters in password")
+	}
+	return password, nil
+}
+
+// GetPassword asks the user for a password with the prompt given.
+func GetPassword(prompt string) string {
+	fmt.Fprintln(os.Stderr, prompt)
+	for {
+		fmt.Fprint(os.Stderr, "password:")
+		password := ReadPassword()
+		password, err := checkPassword(password)
+		if err == nil {
+			return password
+		}
+		fmt.Fprintf(os.Stderr, "Bad password: %v\n", err)
+	}
+}
+
+// ChangePassword will query the user twice for the named password. If
+// the same password is entered it is returned.
+func ChangePassword(name string) string {
+	for {
+		a := GetPassword(fmt.Sprintf("Enter %s password:", name))
+		b := GetPassword(fmt.Sprintf("Confirm %s password:", name))
+		if a == b {
+			return a
+		}
+		fmt.Println("Passwords do not match!")
+	}
+}
+
+// getConfigPassword will query the user for a password the
+// first time it is required.
+func getConfigPassword(q string) {
+	if len(configKey) != 0 {
+		return
+	}
+	for {
+		password := GetPassword(q)
+		err := setConfigPassword(password)
+		if err == nil {
+			return
+		}
+		fmt.Fprintln(os.Stderr, "Error:", err)
+	}
+}
+
+// setConfigPassword will set the configKey to the hash of
+// the password. If the length of the password is
+// zero after trimming+normalization, an error is returned.
+func setConfigPassword(password string) error {
+	password, err := checkPassword(password)
+	if err != nil {
+		return err
+	}
+	// Create SHA256 has of the password
+	sha := sha256.New()
+	_, err = sha.Write([]byte("[" + password + "][rclone-config]"))
+	if err != nil {
+		return err
+	}
+	configKey = sha.Sum(nil)
+	return nil
+}
+
+// changeConfigPassword will query the user twice
+// for a password. If the same password is entered
+// twice the key is updated.
+func changeConfigPassword() {
+	err := setConfigPassword(ChangePassword("NEW configuration"))
+	if err != nil {
+		fmt.Printf("Failed to set config password: %v\n", err)
+		return
+	}
+}
+
+// SaveConfig saves configuration file.
+// if configKey has been set, the file will be encrypted.
+func SaveConfig() {
+	dir, name := filepath.Split(ConfigPath)
+	f, err := ioutil.TempFile(dir, name)
+	if err != nil {
+		log.Fatalf("Failed to create temp file for new config: %v", err)
+		return
+	}
+	defer func() {
+		if err := os.Remove(f.Name()); err != nil && !os.IsNotExist(err) {
+			fs.Errorf(nil, "Failed to remove temp config file: %v", err)
+		}
+	}()
+
+	var buf bytes.Buffer
+	err = goconfig.SaveConfigData(configData, &buf)
+	if err != nil {
+		log.Fatalf("Failed to save config file: %v", err)
+	}
+
+	if len(configKey) == 0 {
+		if _, err := buf.WriteTo(f); err != nil {
+			log.Fatalf("Failed to write temp config file: %v", err)
+		}
+	} else {
+		fmt.Fprintln(f, "# Encrypted rclone configuration File")
+		fmt.Fprintln(f, "")
+		fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:")
+
+		// Generate new nonce and write it to the start of the ciphertext
+		var nonce [24]byte
+		n, _ := rand.Read(nonce[:])
+		if n != 24 {
+			log.Fatalf("nonce short read: %d", n)
+		}
+		enc := base64.NewEncoder(base64.StdEncoding, f)
+		_, err = enc.Write(nonce[:])
+		if err != nil {
+			log.Fatalf("Failed to write temp config file: %v", err)
+		}
+
+		var key [32]byte
+		copy(key[:], configKey[:32])
+
+		b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key)
+		_, err = enc.Write(b)
+		if err != nil {
+			log.Fatalf("Failed to write temp config file: %v", err)
+		}
+		_ = enc.Close()
+	}
+
+	err = f.Close()
+	if err != nil {
+		log.Fatalf("Failed to close config file: %v", err)
+	}
+
+	var fileMode os.FileMode = 0600
+	info, err := os.Stat(ConfigPath)
+	if err != nil {
+		fs.Debugf(nil, "Using default permissions for config file: %v", fileMode)
+	} else if info.Mode() != fileMode {
+		fs.Debugf(nil, "Keeping previous permissions for config file: %v", info.Mode())
+		fileMode = info.Mode()
+	}
+
+	attemptCopyGroup(ConfigPath, f.Name())
+
+	err = os.Chmod(f.Name(), fileMode)
+	if err != nil {
+		fs.Errorf(nil, "Failed to set permissions on config file: %v", err)
+	}
+
+	if err = os.Rename(ConfigPath, ConfigPath+".old"); err != nil && !os.IsNotExist(err) {
+		log.Fatalf("Failed to move previous config to backup location: %v", err)
+	}
+	if err = os.Rename(f.Name(), ConfigPath); err != nil {
+		log.Fatalf("Failed to move newly written config from %s to final location: %v", f.Name(), err)
+	}
+	if err := os.Remove(ConfigPath + ".old"); err != nil && !os.IsNotExist(err) {
+		fs.Errorf(nil, "Failed to remove backup config file: %v", err)
+	}
+}
+
+// SetValueAndSave sets the key to the value and saves just that
+// value in the config file.  It loads the old config file in from
+// disk first and overwrites the given value only.
+func SetValueAndSave(name, key, value string) (err error) {
+	// Set the value in config in case we fail to reload it
+	configData.SetValue(name, key, value)
+	// Reload the config file
+	reloadedConfigFile, err := loadConfigFile()
+	if err == errorConfigFileNotFound {
+		// Config file not written yet so ignore reload
+		return nil
+	} else if err != nil {
+		return err
+	}
+	_, err = reloadedConfigFile.GetSection(name)
+	if err != nil {
+		// Section doesn't exist yet so ignore reload
+		return err
+	}
+	// Update the config file with the reloaded version
+	configData = reloadedConfigFile
+	// Set the value in the reloaded version
+	reloadedConfigFile.SetValue(name, key, value)
+	// Save it again
+	SaveConfig()
+	return nil
+}
+
+// ShowRemotes shows an overview of the config file
+func ShowRemotes() {
+	remotes := configData.GetSectionList()
+	if len(remotes) == 0 {
+		return
+	}
+	sort.Strings(remotes)
+	fmt.Printf("%-20s %s\n", "Name", "Type")
+	fmt.Printf("%-20s %s\n", "====", "====")
+	for _, remote := range remotes {
+		fmt.Printf("%-20s %s\n", remote, FileGet(remote, "type"))
+	}
+}
+
+// ChooseRemote chooses a remote name
+func ChooseRemote() string {
+	remotes := configData.GetSectionList()
+	sort.Strings(remotes)
+	return Choose("remote", remotes, nil, false)
+}
+
+// ReadLine reads some input
+var ReadLine = func() string {
+	buf := bufio.NewReader(os.Stdin)
+	line, err := buf.ReadString('\n')
+	if err != nil {
+		log.Fatalf("Failed to read line: %v", err)
+	}
+	return strings.TrimSpace(line)
+}
+
+// Command - choose one
+func Command(commands []string) byte {
+	opts := []string{}
+	for _, text := range commands {
+		fmt.Printf("%c) %s\n", text[0], text[1:])
+		opts = append(opts, text[:1])
+	}
+	optString := strings.Join(opts, "")
+	optHelp := strings.Join(opts, "/")
+	for {
+		fmt.Printf("%s> ", optHelp)
+		result := strings.ToLower(ReadLine())
+		if len(result) != 1 {
+			continue
+		}
+		i := strings.Index(optString, string(result[0]))
+		if i >= 0 {
+			return result[0]
+		}
+	}
+}
+
+// Confirm asks the user for Yes or No and returns true or false
+func Confirm() bool {
+	if fs.Config.AutoConfirm {
+		return true
+	}
+	return Command([]string{"yYes", "nNo"}) == 'y'
+}
+
+// Choose one of the defaults or type a new string if newOk is set
+func Choose(what string, defaults, help []string, newOk bool) string {
+	valueDescripton := "an existing"
+	if newOk {
+		valueDescripton = "your own"
+	}
+	fmt.Printf("Choose a number from below, or type in %s value\n", valueDescripton)
+	for i, text := range defaults {
+		var lines []string
+		if help != nil {
+			parts := strings.Split(help[i], "\n")
+			lines = append(lines, parts...)
+		}
+		lines = append(lines, fmt.Sprintf("%q", text))
+		pos := i + 1
+		if len(lines) == 1 {
+			fmt.Printf("%2d > %s\n", pos, text)
+		} else {
+			mid := (len(lines) - 1) / 2
+			for i, line := range lines {
+				var sep rune
+				switch i {
+				case 0:
+					sep = '/'
+				case len(lines) - 1:
+					sep = '\\'
+				default:
+					sep = '|'
+				}
+				number := "  "
+				if i == mid {
+					number = fmt.Sprintf("%2d", pos)
+				}
+				fmt.Printf("%s %c %s\n", number, sep, line)
+			}
+		}
+	}
+	for {
+		fmt.Printf("%s> ", what)
+		result := ReadLine()
+		i, err := strconv.Atoi(result)
+		if err != nil {
+			if newOk {
+				return result
+			}
+			for _, v := range defaults {
+				if result == v {
+					return result
+				}
+			}
+			continue
+		}
+		if i >= 1 && i <= len(defaults) {
+			return defaults[i-1]
+		}
+	}
+}
+
+// ChooseNumber asks the user to enter a number between min and max
+// inclusive prompting them with what.
+func ChooseNumber(what string, min, max int) int {
+	for {
+		fmt.Printf("%s> ", what)
+		result := ReadLine()
+		i, err := strconv.Atoi(result)
+		if err != nil {
+			fmt.Printf("Bad number: %v\n", err)
+			continue
+		}
+		if i < min || i > max {
+			fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
+			continue
+		}
+		return i
+	}
+}
+
+// ShowRemote shows the contents of the remote
+func ShowRemote(name string) {
+	fmt.Printf("--------------------\n")
+	fmt.Printf("[%s]\n", name)
+	fs := MustFindByName(name)
+	for _, key := range configData.GetKeyList(name) {
+		isPassword := false
+		for _, option := range fs.Options {
+			if option.Name == key && option.IsPassword {
+				isPassword = true
+				break
+			}
+		}
+		value := FileGet(name, key)
+		if isPassword && value != "" {
+			fmt.Printf("%s = *** ENCRYPTED ***\n", key)
+		} else {
+			fmt.Printf("%s = %s\n", key, value)
+		}
+	}
+	fmt.Printf("--------------------\n")
+}
+
+// OkRemote prints the contents of the remote and ask if it is OK
+func OkRemote(name string) bool {
+	ShowRemote(name)
+	switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
+	case 'y':
+		return true
+	case 'e':
+		return false
+	case 'd':
+		configData.DeleteSection(name)
+		return true
+	default:
+		fs.Errorf(nil, "Bad choice %c", i)
+	}
+	return false
+}
+
+// MustFindByName finds the RegInfo for the remote name passed in or
+// exits with a fatal error.
+func MustFindByName(name string) *fs.RegInfo {
+	fsType := FileGet(name, "type")
+	if fsType == "" {
+		log.Fatalf("Couldn't find type of fs for %q", name)
+	}
+	return fs.MustFind(fsType)
+}
+
+// RemoteConfig runs the config helper for the remote if needed
+func RemoteConfig(name string) {
+	fmt.Printf("Remote config\n")
+	f := MustFindByName(name)
+	if f.Config != nil {
+		f.Config(name)
+	}
+}
+
+// ChooseOption asks the user to choose an option
+func ChooseOption(o *fs.Option) string {
+	fmt.Println(o.Help)
+	if o.IsPassword {
+		actions := []string{"yYes type in my own password", "gGenerate random password"}
+		if o.Optional {
+			actions = append(actions, "nNo leave this optional password blank")
+		}
+		var password string
+		switch i := Command(actions); i {
+		case 'y':
+			password = ChangePassword("the")
+		case 'g':
+			for {
+				fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n")
+				bits := ChooseNumber("Bits", 64, 1024)
+				bytes := bits / 8
+				if bits%8 != 0 {
+					bytes++
+				}
+				var pw = make([]byte, bytes)
+				n, _ := rand.Read(pw)
+				if n != bytes {
+					log.Fatalf("password short read: %d", n)
+				}
+				password = base64.RawURLEncoding.EncodeToString(pw)
+				fmt.Printf("Your password is: %s\n", password)
+				fmt.Printf("Use this password?\n")
+				if Confirm() {
+					break
+				}
+			}
+		case 'n':
+			return ""
+		default:
+			fs.Errorf(nil, "Bad choice %c", i)
+		}
+		return MustObscure(password)
+	}
+	if len(o.Examples) > 0 {
+		var values []string
+		var help []string
+		for _, example := range o.Examples {
+			values = append(values, example.Value)
+			help = append(help, example.Help)
+		}
+		return Choose(o.Name, values, help, true)
+	}
+	fmt.Printf("%s> ", o.Name)
+	return ReadLine()
+}
+
+// UpdateRemote adds the keyValues passed in to the remote of name.
+// keyValues should be key, value pairs.
+func UpdateRemote(name string, keyValues []string) error {
+	if len(keyValues)%2 != 0 {
+		return errors.New("found key without value")
+	}
+	// Set the config
+	for i := 0; i < len(keyValues); i += 2 {
+		configData.SetValue(name, keyValues[i], keyValues[i+1])
+	}
+	RemoteConfig(name)
+	ShowRemote(name)
+	SaveConfig()
+	return nil
+}
+
+// CreateRemote creates a new remote with name, provider and a list of
+// parameters which are key, value pairs.  If update is set then it
+// adds the new keys rather than replacing all of them.
+func CreateRemote(name string, provider string, keyValues []string) error {
+	// Suppress Confirm
+	fs.Config.AutoConfirm = true
+	// Delete the old config if it exists
+	configData.DeleteSection(name)
+	// Set the type
+	configData.SetValue(name, "type", provider)
+	// Show this is automatically configured
+	configData.SetValue(name, ConfigAutomatic, "yes")
+	// Set the remaining values
+	return UpdateRemote(name, keyValues)
+}
+
+// PasswordRemote adds the keyValues passed in to the remote of name.
+// keyValues should be key, value pairs.
+func PasswordRemote(name string, keyValues []string) error {
+	if len(keyValues) != 2 {
+		return errors.New("found key without value")
+	}
+	// Suppress Confirm
+	fs.Config.AutoConfirm = true
+	passwd := MustObscure(keyValues[1])
+	if passwd != "" {
+		configData.SetValue(name, keyValues[0], passwd)
+		RemoteConfig(name)
+		ShowRemote(name)
+		SaveConfig()
+	}
+	return nil
+}
+
+// JSONListProviders prints all the providers and options in JSON format
+func JSONListProviders() error {
+	b, err := json.MarshalIndent(fs.Registry, "", "    ")
+	if err != nil {
+		return errors.Wrap(err, "failed to marshal examples")
+	}
+	_, err = os.Stdout.Write(b)
+	if err != nil {
+		return errors.Wrap(err, "failed to write providers list")
+	}
+	return nil
+}
+
+// fsOption returns an Option describing the possible remotes
+func fsOption() *fs.Option {
+	o := &fs.Option{
+		Name: "Storage",
+		Help: "Type of storage to configure.",
+	}
+	for _, item := range fs.Registry {
+		example := fs.OptionExample{
+			Value: item.Name,
+			Help:  item.Description,
+		}
+		o.Examples = append(o.Examples, example)
+	}
+	o.Examples.Sort()
+	return o
+}
+
+// NewRemoteName asks the user for a name for a remote
+func NewRemoteName() (name string) {
+	for {
+		fmt.Printf("name> ")
+		name = ReadLine()
+		parts := fs.Matcher.FindStringSubmatch(name + ":")
+		switch {
+		case name == "":
+			fmt.Printf("Can't use empty name.\n")
+		case driveletter.IsDriveLetter(name):
+			fmt.Printf("Can't use %q as it can be confused a drive letter.\n", name)
+		case parts == nil:
+			fmt.Printf("Can't use %q as it has invalid characters in it.\n", name)
+		default:
+			return name
+		}
+	}
+}
+
+// NewRemote make a new remote from its name
+func NewRemote(name string) {
+	newType := ChooseOption(fsOption())
+	configData.SetValue(name, "type", newType)
+	fs := fs.MustFind(newType)
+	for _, option := range fs.Options {
+		configData.SetValue(name, option.Name, ChooseOption(&option))
+	}
+	RemoteConfig(name)
+	if OkRemote(name) {
+		SaveConfig()
+		return
+	}
+	EditRemote(fs, name)
+}
+
+// EditRemote gets the user to edit a remote
+func EditRemote(fs *fs.RegInfo, name string) {
+	ShowRemote(name)
+	fmt.Printf("Edit remote\n")
+	for {
+		for _, option := range fs.Options {
+			key := option.Name
+			value := FileGet(name, key)
+			fmt.Printf("Value %q = %q\n", key, value)
+			fmt.Printf("Edit? (y/n)>\n")
+			if Confirm() {
+				newValue := ChooseOption(&option)
+				configData.SetValue(name, key, newValue)
+			}
+		}
+		if OkRemote(name) {
+			break
+		}
+	}
+	SaveConfig()
+	RemoteConfig(name)
+}
+
+// DeleteRemote gets the user to delete a remote
+func DeleteRemote(name string) {
+	configData.DeleteSection(name)
+	SaveConfig()
+}
+
+// copyRemote asks the user for a new remote name and copies name into
+// it. Returns the new name.
+func copyRemote(name string) string {
+	newName := NewRemoteName()
+	// Copy the keys
+	for _, key := range configData.GetKeyList(name) {
+		value := configData.MustValue(name, key, "")
+		configData.SetValue(newName, key, value)
+	}
+	return newName
+}
+
+// RenameRemote renames a config section
+func RenameRemote(name string) {
+	fmt.Printf("Enter new name for %q remote.\n", name)
+	newName := copyRemote(name)
+	if name != newName {
+		configData.DeleteSection(name)
+		SaveConfig()
+	}
+}
+
+// CopyRemote copies a config section
+func CopyRemote(name string) {
+	fmt.Printf("Enter name for copy of %q remote.\n", name)
+	copyRemote(name)
+	SaveConfig()
+}
+
+// ShowConfigLocation prints the location of the config file in use
+func ShowConfigLocation() {
+	if _, err := os.Stat(ConfigPath); os.IsNotExist(err) {
+		fmt.Println("Configuration file doesn't exist, but rclone will use this path:")
+	} else {
+		fmt.Println("Configuration file is stored at:")
+	}
+	fmt.Printf("%s\n", ConfigPath)
+}
+
+// ShowConfig prints the (unencrypted) config options
+func ShowConfig() {
+	var buf bytes.Buffer
+	if err := goconfig.SaveConfigData(configData, &buf); err != nil {
+		log.Fatalf("Failed to serialize config: %v", err)
+	}
+	str := buf.String()
+	if str == "" {
+		str = "; empty config\n"
+	}
+	fmt.Printf("%s", str)
+}
+
+// EditConfig edits the config file interactively
+func EditConfig() {
+	for {
+		haveRemotes := len(configData.GetSectionList()) != 0
+		what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
+		if haveRemotes {
+			fmt.Printf("Current remotes:\n\n")
+			ShowRemotes()
+			fmt.Printf("\n")
+		} else {
+			fmt.Printf("No remotes found - make a new one\n")
+			// take 2nd item and last 2 items of menu list
+			what = append(what[1:2], what[len(what)-2:]...)
+		}
+		switch i := Command(what); i {
+		case 'e':
+			name := ChooseRemote()
+			fs := MustFindByName(name)
+			EditRemote(fs, name)
+		case 'n':
+			NewRemote(NewRemoteName())
+		case 'd':
+			name := ChooseRemote()
+			DeleteRemote(name)
+		case 'r':
+			RenameRemote(ChooseRemote())
+		case 'c':
+			CopyRemote(ChooseRemote())
+		case 's':
+			SetPassword()
+		case 'q':
+			return
+
+		}
+	}
+}
+
+// SetPassword will allow the user to modify the current
+// configuration encryption settings.
+func SetPassword() {
+	for {
+		if len(configKey) > 0 {
+			fmt.Println("Your configuration is encrypted.")
+			what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
+			switch i := Command(what); i {
+			case 'c':
+				changeConfigPassword()
+				SaveConfig()
+				fmt.Println("Password changed")
+				continue
+			case 'u':
+				configKey = nil
+				SaveConfig()
+				continue
+			case 'q':
+				return
+			}
+
+		} else {
+			fmt.Println("Your configuration is not encrypted.")
+			fmt.Println("If you add a password, you will protect your login information to cloud services.")
+			what := []string{"aAdd Password", "qQuit to main menu"}
+			switch i := Command(what); i {
+			case 'a':
+				changeConfigPassword()
+				SaveConfig()
+				fmt.Println("Password set")
+				continue
+			case 'q':
+				return
+			}
+		}
+	}
+}
+
+// Authorize is for remote authorization of headless machines.
+//
+// It expects 1 or 3 arguments
+//
+//   rclone authorize "fs name"
+//   rclone authorize "fs name" "client id" "client secret"
+func Authorize(args []string) {
+	switch len(args) {
+	case 1, 3:
+	default:
+		log.Fatalf("Invalid number of arguments: %d", len(args))
+	}
+	newType := args[0]
+	fs := fs.MustFind(newType)
+	if fs.Config == nil {
+		log.Fatalf("Can't authorize fs %q", newType)
+	}
+	// Name used for temporary fs
+	name := "**temp-fs**"
+
+	// Make sure we delete it
+	defer DeleteRemote(name)
+
+	// Indicate that we want fully automatic configuration.
+	configData.SetValue(name, ConfigAutomatic, "yes")
+	if len(args) == 3 {
+		configData.SetValue(name, ConfigClientID, args[1])
+		configData.SetValue(name, ConfigClientSecret, args[2])
+	}
+	fs.Config(name)
+}
+
+// configToEnv converts an config section and name, eg ("myremote",
+// "ignore-size") into an environment name
+// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
+func configToEnv(section, name string) string {
+	return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
+}
+
+// FileGet gets the config key under section returning the
+// default or empty string if not set.
+//
+// It looks up defaults in the environment if they are present
+func FileGet(section, key string, defaultVal ...string) string {
+	envKey := configToEnv(section, key)
+	newValue, found := os.LookupEnv(envKey)
+	if found {
+		defaultVal = []string{newValue}
+	}
+	return configData.MustValue(section, key, defaultVal...)
+}
+
+// FileGetBool gets the config key under section returning the
+// default or false if not set.
+//
+// It looks up defaults in the environment if they are present
+func FileGetBool(section, key string, defaultVal ...bool) bool {
+	envKey := configToEnv(section, key)
+	newValue, found := os.LookupEnv(envKey)
+	if found {
+		newBool, err := strconv.ParseBool(newValue)
+		if err != nil {
+			fs.Errorf(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err)
+		} else {
+			defaultVal = []bool{newBool}
+		}
+	}
+	return configData.MustBool(section, key, defaultVal...)
+}
+
+// FileGetInt gets the config key under section returning the
+// default or 0 if not set.
+//
+// It looks up defaults in the environment if they are present
+func FileGetInt(section, key string, defaultVal ...int) int {
+	envKey := configToEnv(section, key)
+	newValue, found := os.LookupEnv(envKey)
+	if found {
+		newInt, err := strconv.Atoi(newValue)
+		if err != nil {
+			fs.Errorf(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err)
+		} else {
+			defaultVal = []int{newInt}
+		}
+	}
+	return configData.MustInt(section, key, defaultVal...)
+}
+
+// FileSet sets the key in section to value.  It doesn't save
+// the config file.
+func FileSet(section, key, value string) {
+	configData.SetValue(section, key, value)
+}
+
+// FileDeleteKey deletes the config key in the config file.
+// It returns true if the key was deleted,
+// or returns false if the section or key didn't exist.
+func FileDeleteKey(section, key string) bool {
+	return configData.DeleteKey(section, key)
+}
+
+var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`)
+
+// FileSections returns the sections in the config file
+// including any defined by environment variables.
+func FileSections() []string {
+	sections := configData.GetSectionList()
+	for _, item := range os.Environ() {
+		matches := matchEnv.FindStringSubmatch(item)
+		if len(matches) == 2 {
+			sections = append(sections, strings.ToLower(matches[1]))
+		}
+	}
+	return sections
+}
+
+// Dump dumps all the config as a JSON file
+func Dump() error {
+	dump := make(map[string]map[string]string)
+	for _, name := range configData.GetSectionList() {
+		params := make(map[string]string)
+		for _, key := range configData.GetKeyList(name) {
+			params[key] = FileGet(name, key)
+		}
+		dump[name] = params
+	}
+	b, err := json.MarshalIndent(dump, "", "    ")
+	if err != nil {
+		return errors.Wrap(err, "failed to marshal config dump")
+	}
+	_, err = os.Stdout.Write(b)
+	if err != nil {
+		return errors.Wrap(err, "failed to write config dump")
+	}
+	return nil
+}
+
+// makeCacheDir returns a directory to use for caching.
+//
+// Code borrowed from go stdlib until it is made public
+func makeCacheDir() (dir string) {
+	// Compute default location.
+	switch runtime.GOOS {
+	case "windows":
+		dir = os.Getenv("LocalAppData")
+
+	case "darwin":
+		dir = os.Getenv("HOME")
+		if dir != "" {
+			dir += "/Library/Caches"
+		}
+
+	case "plan9":
+		dir = os.Getenv("home")
+		if dir != "" {
+			// Plan 9 has no established per-user cache directory,
+			// but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix.
+			dir += "/lib/cache"
+		}
+
+	default: // Unix
+		// https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+		dir = os.Getenv("XDG_CACHE_HOME")
+		if dir == "" {
+			dir = os.Getenv("HOME")
+			if dir != "" {
+				dir += "/.cache"
+			}
+		}
+	}
+
+	// if no dir found then use TempDir - we will have a cachedir!
+	if dir == "" {
+		dir = os.TempDir()
+	}
+	return filepath.Join(dir, "rclone")
+}
diff --git a/fs/config_other.go b/fs/config/config_other.go
similarity index 95%
rename from fs/config_other.go
rename to fs/config/config_other.go
index 7c0834318..e9024a801 100644
--- a/fs/config_other.go
+++ b/fs/config/config_other.go
@@ -3,7 +3,7 @@
 
 // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
 
-package fs
+package config
 
 // attemptCopyGroups tries to keep the group the same, which only makes sense
 // for system with user-group-world permission model.
diff --git a/fs/config_read_password.go b/fs/config/config_read_password.go
similarity index 97%
rename from fs/config_read_password.go
rename to fs/config/config_read_password.go
index cc1eb609d..aadc2b275 100644
--- a/fs/config_read_password.go
+++ b/fs/config/config_read_password.go
@@ -4,7 +4,7 @@
 
 // +build !solaris,!plan9
 
-package fs
+package config
 
 import (
 	"fmt"
diff --git a/fs/config_read_password_unsupported.go b/fs/config/config_read_password_unsupported.go
similarity index 95%
rename from fs/config_read_password_unsupported.go
rename to fs/config/config_read_password_unsupported.go
index 0666f59fc..eb762448c 100644
--- a/fs/config_read_password_unsupported.go
+++ b/fs/config/config_read_password_unsupported.go
@@ -4,7 +4,7 @@
 
 // +build solaris plan9
 
-package fs
+package config
 
 // ReadPassword reads a password with echoing it to the terminal.
 func ReadPassword() string {
diff --git a/fs/config_test.go b/fs/config/config_test.go
similarity index 63%
rename from fs/config_test.go
rename to fs/config/config_test.go
index 544d51aee..c97ee434e 100644
--- a/fs/config_test.go
+++ b/fs/config/config_test.go
@@ -1,45 +1,15 @@
-package fs
+package config
 
 import (
-	"bytes"
-	"crypto/rand"
 	"io/ioutil"
 	"os"
 	"testing"
 
+	"github.com/ncw/rclone/fs"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-func TestObscure(t *testing.T) {
-	for _, test := range []struct {
-		in   string
-		want string
-		iv   string
-	}{
-		{"", "YWFhYWFhYWFhYWFhYWFhYQ", "aaaaaaaaaaaaaaaa"},
-		{"potato", "YWFhYWFhYWFhYWFhYWFhYXMaGgIlEQ", "aaaaaaaaaaaaaaaa"},
-		{"potato", "YmJiYmJiYmJiYmJiYmJiYp3gcEWbAw", "bbbbbbbbbbbbbbbb"},
-	} {
-		cryptRand = bytes.NewBufferString(test.iv)
-		got, err := Obscure(test.in)
-		cryptRand = rand.Reader
-		assert.NoError(t, err)
-		assert.Equal(t, test.want, got)
-		recoveredIn, err := Reveal(got)
-		assert.NoError(t, err)
-		assert.Equal(t, test.in, recoveredIn, "not bidirectional")
-		// Now the Must variants
-		cryptRand = bytes.NewBufferString(test.iv)
-		got = MustObscure(test.in)
-		cryptRand = rand.Reader
-		assert.Equal(t, test.want, got)
-		recoveredIn = MustReveal(got)
-		assert.Equal(t, test.in, recoveredIn, "not bidirectional")
-
-	}
-}
-
 func TestCRUD(t *testing.T) {
 	configKey = nil // reset password
 	// create temp config file
@@ -54,39 +24,47 @@ func TestCRUD(t *testing.T) {
 
 	// temporarily adapt configuration
 	oldOsStdout := os.Stdout
-	oldConfigFile := configFile
-	oldConfig := Config
+	oldConfigPath := ConfigPath
+	oldConfig := fs.Config
 	oldConfigData := configData
 	oldReadLine := ReadLine
 	os.Stdout = nil
-	configFile = &path
-	Config = &ConfigInfo{}
+	ConfigPath = path
+	fs.Config = &fs.ConfigInfo{}
 	configData = nil
 	defer func() {
 		os.Stdout = oldOsStdout
-		configFile = oldConfigFile
+		ConfigPath = oldConfigPath
 		ReadLine = oldReadLine
-		Config = oldConfig
+		fs.Config = oldConfig
 		configData = oldConfigData
 	}()
 
 	LoadConfig()
 	assert.Equal(t, []string{}, configData.GetSectionList())
 
+	// Fake a remote
+	fs.Register(&fs.RegInfo{Name: "config_test_remote"})
+
 	// add new remote
 	i := 0
 	ReadLine = func() string {
 		answers := []string{
-			"local", // type is local
-			"1",     // yes, disable long filenames
-			"y",     // looks good, save
+			"config_test_remote", // type
+			"y",                  // looks good, save
 		}
 		i = i + 1
 		return answers[i-1]
 	}
+
 	NewRemote("test")
 	assert.Equal(t, []string{"test"}, configData.GetSectionList())
 
+	// Reload the config file to workaround this bug
+	// https://github.com/Unknwon/goconfig/issues/39
+	configData, err = loadConfigFile()
+	require.NoError(t, err)
+
 	// normal rename, test → asdf
 	ReadLine = func() string { return "asdf" }
 	RenameRemote("test")
@@ -226,50 +204,3 @@ func hashedKeyCompare(t *testing.T, a, b string, shouldMatch bool) {
 		assert.NotEqual(t, k1, k2)
 	}
 }
-
-func TestDumpFlagsString(t *testing.T) {
-	assert.Equal(t, "", DumpFlags(0).String())
-	assert.Equal(t, "headers", (DumpHeaders).String())
-	assert.Equal(t, "headers,bodies", (DumpHeaders | DumpBodies).String())
-	assert.Equal(t, "headers,bodies,requests,responses,auth,filters", (DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters).String())
-	assert.Equal(t, "headers,Unknown-0x8000", (DumpHeaders | DumpFlags(0x8000)).String())
-}
-
-func TestDumpFlagsSet(t *testing.T) {
-	for _, test := range []struct {
-		in      string
-		want    DumpFlags
-		wantErr string
-	}{
-		{"", DumpFlags(0), ""},
-		{"bodies", DumpBodies, ""},
-		{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
-		{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
-		{"headers,bodies,requests,responses,auth,filters", DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""},
-		{"headers,bodies,unknown,auth", 0, "Unknown dump flag \"unknown\""},
-	} {
-		f := DumpFlags(-1)
-		initial := f
-		err := f.Set(test.in)
-		if err != nil {
-			if test.wantErr == "" {
-				t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
-			} else {
-				assert.Contains(t, err.Error(), test.wantErr)
-			}
-			assert.Equal(t, initial, f, test.want)
-		} else {
-			if test.wantErr != "" {
-				t.Errorf("Got no error when expecting one on %q", test.in)
-			} else {
-				assert.Equal(t, test.want, f)
-			}
-		}
-
-	}
-}
-
-func TestDumpFlagsType(t *testing.T) {
-	f := DumpFlags(0)
-	assert.Equal(t, "string", f.Type())
-}
diff --git a/fs/config_unix.go b/fs/config/config_unix.go
similarity index 87%
rename from fs/config_unix.go
rename to fs/config/config_unix.go
index f0fe9daa0..6cda925fd 100644
--- a/fs/config_unix.go
+++ b/fs/config/config_unix.go
@@ -3,13 +3,15 @@
 
 // +build darwin dragonfly freebsd linux netbsd openbsd solaris
 
-package fs
+package config
 
 import (
 	"os"
 	"os/user"
 	"strconv"
 	"syscall"
+
+	"github.com/ncw/rclone/fs"
 )
 
 // attemptCopyGroups tries to keep the group the same. User will be the one
@@ -29,7 +31,7 @@ func attemptCopyGroup(fromPath, toPath string) {
 			}
 		}
 		if err = os.Chown(toPath, uid, int(stat.Gid)); err != nil {
-			Debugf(nil, "Failed to keep previous owner of config file: %v", err)
+			fs.Debugf(nil, "Failed to keep previous owner of config file: %v", err)
 		}
 	}
 }
diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go
new file mode 100644
index 000000000..4f6e6993b
--- /dev/null
+++ b/fs/config/configflags/configflags.go
@@ -0,0 +1,162 @@
+// Package configflags defines the flags used by rclone.  It is
+// decoupled into a separate package so it can be replaced.
+package configflags
+
+// Options set by command line flags
+import (
+	"log"
+	"net"
+	"path/filepath"
+	"strings"
+
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/spf13/pflag"
+)
+
+var (
+	// these will get interpreted into fs.Config via SetFlags() below
+	verbose         int
+	quiet           bool
+	dumpHeaders     bool
+	dumpBodies      bool
+	deleteBefore    bool
+	deleteDuring    bool
+	deleteAfter     bool
+	bindAddr        string
+	disableFeatures string
+)
+
+// AddFlags adds the non filing system specific flags to the command
+func AddFlags(flagSet *pflag.FlagSet) {
+	// NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig
+	flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
+	flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
+	flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same")
+	flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.")
+	flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.")
+	flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
+	flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
+	flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum & size, not mod-time & size")
+	flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum")
+	flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
+	flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination")
+	flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes")
+	flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout")
+	flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout")
+	flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP bodies - may contain sensitive info")
+	flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
+	flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
+	flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.")
+	flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transfering")
+	flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer (default)")
+	flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transfering")
+	flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server side move if possible")
+	flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.")
+	flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.")
+	flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.")
+	flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.")
+	flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
+	flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
+	flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
+	flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
+	flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
+	flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
+	flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
+	flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
+	flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
+	flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
+	flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features.  Use help to see a list.")
+	flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
+	flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.")
+	flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.")
+	flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
+	flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
+	flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
+	flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
+	flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "Buffer size when copying files.")
+	flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
+	flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
+
+}
+
+// SetFlags converts any flags into config which weren't straight foward
+func SetFlags() {
+	fs.Config.LogLevel = fs.LogLevelNotice
+	if verbose >= 2 {
+		fs.Config.LogLevel = fs.LogLevelDebug
+	} else if verbose >= 1 {
+		fs.Config.LogLevel = fs.LogLevelInfo
+	}
+	if quiet {
+		if verbose > 0 {
+			log.Fatalf("Can't set -v and -q")
+		}
+		fs.Config.LogLevel = fs.LogLevelError
+	}
+	logLevelFlag := pflag.Lookup("log-level")
+	if logLevelFlag != nil && logLevelFlag.Changed {
+		if verbose > 0 {
+			log.Fatalf("Can't set -v and --log-level")
+		}
+		if quiet {
+			log.Fatalf("Can't set -q and --log-level")
+		}
+	}
+
+	if dumpHeaders {
+		fs.Config.Dump |= fs.DumpHeaders
+		fs.Infof(nil, "--dump-headers is obsolete - please use --dump headers instead")
+	}
+	if dumpBodies {
+		fs.Config.Dump |= fs.DumpBodies
+		fs.Infof(nil, "--dump-bodies is obsolete - please use --dump bodies instead")
+	}
+
+	switch {
+	case deleteBefore && (deleteDuring || deleteAfter),
+		deleteDuring && deleteAfter:
+		log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
+	case deleteBefore:
+		fs.Config.DeleteMode = fs.DeleteModeBefore
+	case deleteDuring:
+		fs.Config.DeleteMode = fs.DeleteModeDuring
+	case deleteAfter:
+		fs.Config.DeleteMode = fs.DeleteModeAfter
+	default:
+		fs.Config.DeleteMode = fs.DeleteModeDefault
+	}
+
+	if fs.Config.IgnoreSize && fs.Config.SizeOnly {
+		log.Fatalf(`Can't use --size-only and --ignore-size together.`)
+	}
+
+	if fs.Config.Suffix != "" && fs.Config.BackupDir == "" {
+		log.Fatalf(`Can only use --suffix with --backup-dir.`)
+	}
+
+	if bindAddr != "" {
+		addrs, err := net.LookupIP(bindAddr)
+		if err != nil {
+			log.Fatalf("--bind: Failed to parse %q as IP address: %v", bindAddr, err)
+		}
+		if len(addrs) != 1 {
+			log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs))
+		}
+		fs.Config.BindAddr = addrs[0]
+	}
+
+	if disableFeatures != "" {
+		if disableFeatures == "help" {
+			log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", "))
+		}
+		fs.Config.DisableFeatures = strings.Split(disableFeatures, ",")
+	}
+
+	// Make the config file absolute
+	configPath, err := filepath.Abs(config.ConfigPath)
+	if err == nil {
+		config.ConfigPath = configPath
+	}
+}
diff --git a/fs/flags.go b/fs/config/flags/flags.go
similarity index 51%
rename from fs/flags.go
rename to fs/config/flags/flags.go
index fac105950..bbb4f6bdd 100644
--- a/fs/flags.go
+++ b/fs/config/flags/flags.go
@@ -1,239 +1,17 @@
-// This contains helper functions for managing flags
-
-package fs
+// Package flags contains enahnced versions of spf13/pflag flag
+// routines which will read from the environment also.
+package flags
 
 import (
-	"fmt"
 	"log"
-	"math"
 	"os"
-	"strconv"
 	"strings"
 	"time"
 
-	"github.com/pkg/errors"
+	"github.com/ncw/rclone/fs"
 	"github.com/spf13/pflag"
 )
 
-// SizeSuffix is parsed by flag with k/M/G suffixes
-type SizeSuffix int64
-
-// Turn SizeSuffix into a string and a suffix
-func (x SizeSuffix) string() (string, string) {
-	scaled := float64(0)
-	suffix := ""
-	switch {
-	case x < 0:
-		return "off", ""
-	case x == 0:
-		return "0", ""
-	case x < 1024:
-		scaled = float64(x)
-		suffix = ""
-	case x < 1024*1024:
-		scaled = float64(x) / 1024
-		suffix = "k"
-	case x < 1024*1024*1024:
-		scaled = float64(x) / 1024 / 1024
-		suffix = "M"
-	default:
-		scaled = float64(x) / 1024 / 1024 / 1024
-		suffix = "G"
-	}
-	if math.Floor(scaled) == scaled {
-		return fmt.Sprintf("%.0f", scaled), suffix
-	}
-	return fmt.Sprintf("%.3f", scaled), suffix
-}
-
-// String turns SizeSuffix into a string
-func (x SizeSuffix) String() string {
-	val, suffix := x.string()
-	return val + suffix
-}
-
-// Unit turns SizeSuffix into a string with a unit
-func (x SizeSuffix) Unit(unit string) string {
-	val, suffix := x.string()
-	if val == "off" {
-		return val
-	}
-	return val + " " + suffix + unit
-}
-
-// Set a SizeSuffix
-func (x *SizeSuffix) Set(s string) error {
-	if len(s) == 0 {
-		return errors.New("empty string")
-	}
-	if strings.ToLower(s) == "off" {
-		*x = -1
-		return nil
-	}
-	suffix := s[len(s)-1]
-	suffixLen := 1
-	var multiplier float64
-	switch suffix {
-	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
-		suffixLen = 0
-		multiplier = 1 << 10
-	case 'b', 'B':
-		multiplier = 1
-	case 'k', 'K':
-		multiplier = 1 << 10
-	case 'm', 'M':
-		multiplier = 1 << 20
-	case 'g', 'G':
-		multiplier = 1 << 30
-	default:
-		return errors.Errorf("bad suffix %q", suffix)
-	}
-	s = s[:len(s)-suffixLen]
-	value, err := strconv.ParseFloat(s, 64)
-	if err != nil {
-		return err
-	}
-	if value < 0 {
-		return errors.Errorf("size can't be negative %q", s)
-	}
-	value *= multiplier
-	*x = SizeSuffix(value)
-	return nil
-}
-
-// Type of the value
-func (x *SizeSuffix) Type() string {
-	return "int64"
-}
-
-// Check it satisfies the interface
-var _ pflag.Value = (*SizeSuffix)(nil)
-
-// BwTimeSlot represents a bandwidth configuration at a point in time.
-type BwTimeSlot struct {
-	hhmm      int
-	bandwidth SizeSuffix
-}
-
-// BwTimetable contains all configured time slots.
-type BwTimetable []BwTimeSlot
-
-// String returns a printable representation of BwTimetable.
-func (x BwTimetable) String() string {
-	ret := []string{}
-	for _, ts := range x {
-		ret = append(ret, fmt.Sprintf("%04.4d,%s", ts.hhmm, ts.bandwidth.String()))
-	}
-	return strings.Join(ret, " ")
-}
-
-// Set the bandwidth timetable.
-func (x *BwTimetable) Set(s string) error {
-	// The timetable is formatted as:
-	// "hh:mm,bandwidth hh:mm,banwidth..." ex: "10:00,10G 11:30,1G 18:00,off"
-	// If only a single bandwidth identifier is provided, we assume constant bandwidth.
-
-	if len(s) == 0 {
-		return errors.New("empty string")
-	}
-	// Single value without time specification.
-	if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
-		ts := BwTimeSlot{}
-		if err := ts.bandwidth.Set(s); err != nil {
-			return err
-		}
-		ts.hhmm = 0
-		*x = BwTimetable{ts}
-		return nil
-	}
-
-	for _, tok := range strings.Split(s, " ") {
-		tv := strings.Split(tok, ",")
-
-		// Format must be HH:MM,BW
-		if len(tv) != 2 {
-			return errors.Errorf("invalid time/bandwidth specification: %q", tok)
-		}
-
-		// Basic timespec sanity checking
-		hhmm := tv[0]
-		if len(hhmm) != 5 {
-			return errors.Errorf("invalid time specification (hh:mm): %q", hhmm)
-		}
-		hh, err := strconv.Atoi(hhmm[0:2])
-		if err != nil {
-			return errors.Errorf("invalid hour in time specification %q: %v", hhmm, err)
-		}
-		if hh < 0 || hh > 23 {
-			return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
-		}
-		mm, err := strconv.Atoi(hhmm[3:])
-		if err != nil {
-			return errors.Errorf("invalid minute in time specification: %q: %v", hhmm, err)
-		}
-		if mm < 0 || mm > 59 {
-			return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
-		}
-
-		ts := BwTimeSlot{
-			hhmm: (hh * 100) + mm,
-		}
-		// Bandwidth limit for this time slot.
-		if err := ts.bandwidth.Set(tv[1]); err != nil {
-			return err
-		}
-		*x = append(*x, ts)
-	}
-	return nil
-}
-
-// LimitAt returns a BwTimeSlot for the time requested.
-func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
-	// If the timetable is empty, we return an unlimited BwTimeSlot starting at midnight.
-	if len(x) == 0 {
-		return BwTimeSlot{hhmm: 0, bandwidth: -1}
-	}
-
-	hhmm := tt.Hour()*100 + tt.Minute()
-
-	// By default, we return the last element in the timetable. This
-	// satisfies two conditions: 1) If there's only one element it
-	// will always be selected, and 2) The last element of the table
-	// will "wrap around" until overriden by an earlier time slot.
-	// there's only one time slot in the timetable.
-	ret := x[len(x)-1]
-
-	mindif := 0
-	first := true
-
-	// Look for most recent time slot.
-	for _, ts := range x {
-		// Ignore the past
-		if hhmm < ts.hhmm {
-			continue
-		}
-		dif := ((hhmm / 100 * 60) + (hhmm % 100)) - ((ts.hhmm / 100 * 60) + (ts.hhmm % 100))
-		if first {
-			mindif = dif
-			first = false
-		}
-		if dif <= mindif {
-			mindif = dif
-			ret = ts
-		}
-	}
-
-	return ret
-}
-
-// Type of the value
-func (x BwTimetable) Type() string {
-	return "BwTimetable"
-}
-
-// Check it satisfies the interface
-var _ pflag.Value = (*BwTimetable)(nil)
-
 // optionToEnv converts an option name, eg "ignore-size" into an
 // environment name "RCLONE_IGNORE_SIZE"
 func optionToEnv(name string) string {
@@ -254,7 +32,7 @@ func setDefaultFromEnv(name string) {
 		if err != nil {
 			log.Fatalf("Invalid value for environment variable %q: %v", key, err)
 		}
-		Debugf(nil, "Set default for %q from %q to %q (%v)", name, key, newValue, flag.Value)
+		fs.Debugf(nil, "Set default for %q from %q to %q (%v)", name, key, newValue, flag.Value)
 		flag.DefValue = newValue
 	}
 }
@@ -302,6 +80,15 @@ func IntP(name, shorthand string, value int, usage string) (out *int) {
 	return out
 }
 
+// Int64P defines a flag which can be overridden by an environment variable
+//
+// It is a thin wrapper around pflag.IntP
+func Int64P(name, shorthand string, value int64, usage string) (out *int64) {
+	out = pflag.Int64P(name, shorthand, value, usage)
+	setDefaultFromEnv(name)
+	return out
+}
+
 // IntVarP defines a flag which can be overridden by an environment variable
 //
 // It is a thin wrapper around pflag.IntVarP
@@ -360,10 +147,10 @@ func VarP(value pflag.Value, name, shorthand, usage string) {
 	setDefaultFromEnv(name)
 }
 
-// FlagsVarP defines a flag which can be overridden by an environment variable
+// FVarP defines a flag which can be overridden by an environment variable
 //
 // It is a thin wrapper around pflag.VarP
-func FlagsVarP(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage string) {
+func FVarP(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage string) {
 	flags.VarP(value, name, shorthand, usage)
 	setDefaultFromEnv(name)
 }
diff --git a/fs/config/obscure.go b/fs/config/obscure.go
new file mode 100644
index 000000000..7db600141
--- /dev/null
+++ b/fs/config/obscure.go
@@ -0,0 +1,95 @@
+// Obscure and Reveal config values
+
+package config
+
+import (
+	"crypto/aes"
+	"crypto/cipher"
+	"crypto/rand"
+	"encoding/base64"
+	"io"
+	"log"
+
+	"github.com/pkg/errors"
+)
+
+// crypt internals
+var (
+	cryptKey = []byte{
+		0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
+		0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
+		0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
+		0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38,
+	}
+	cryptBlock cipher.Block
+	cryptRand  = rand.Reader
+)
+
+// crypt transforms in to out using iv under AES-CTR.
+//
+// in and out may be the same buffer.
+//
+// Note encryption and decryption are the same operation
+func crypt(out, in, iv []byte) error {
+	if cryptBlock == nil {
+		var err error
+		cryptBlock, err = aes.NewCipher(cryptKey)
+		if err != nil {
+			return err
+		}
+	}
+	stream := cipher.NewCTR(cryptBlock, iv)
+	stream.XORKeyStream(out, in)
+	return nil
+}
+
+// Obscure a value
+//
+// This is done by encrypting with AES-CTR
+func Obscure(x string) (string, error) {
+	plaintext := []byte(x)
+	ciphertext := make([]byte, aes.BlockSize+len(plaintext))
+	iv := ciphertext[:aes.BlockSize]
+	if _, err := io.ReadFull(cryptRand, iv); err != nil {
+		return "", errors.Wrap(err, "failed to read iv")
+	}
+	if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
+		return "", errors.Wrap(err, "encrypt failed")
+	}
+	return base64.RawURLEncoding.EncodeToString(ciphertext), nil
+}
+
+// MustObscure obscures a value, exiting with a fatal error if it failed
+func MustObscure(x string) string {
+	out, err := Obscure(x)
+	if err != nil {
+		log.Fatalf("Obscure failed: %v", err)
+	}
+	return out
+}
+
+// Reveal an obscured value
+func Reveal(x string) (string, error) {
+	ciphertext, err := base64.RawURLEncoding.DecodeString(x)
+	if err != nil {
+		return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
+	}
+	if len(ciphertext) < aes.BlockSize {
+		return "", errors.New("input too short when revealing password - is it obscured?")
+	}
+	buf := ciphertext[aes.BlockSize:]
+	iv := ciphertext[:aes.BlockSize]
+	if err := crypt(buf, buf, iv); err != nil {
+		return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
+	}
+	return string(buf), nil
+}
+
+// MustReveal reveals an obscured value, exiting with a fatal error if it failed
+func MustReveal(x string) string {
+	out, err := Reveal(x)
+	if err != nil {
+		log.Fatalf("Reveal failed: %v", err)
+	}
+	return out
+}
diff --git a/fs/config/obscure_test.go b/fs/config/obscure_test.go
new file mode 100644
index 000000000..90d76e628
--- /dev/null
+++ b/fs/config/obscure_test.go
@@ -0,0 +1,38 @@
+package config
+
+import (
+	"bytes"
+	"crypto/rand"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestObscure(t *testing.T) {
+	for _, test := range []struct {
+		in   string
+		want string
+		iv   string
+	}{
+		{"", "YWFhYWFhYWFhYWFhYWFhYQ", "aaaaaaaaaaaaaaaa"},
+		{"potato", "YWFhYWFhYWFhYWFhYWFhYXMaGgIlEQ", "aaaaaaaaaaaaaaaa"},
+		{"potato", "YmJiYmJiYmJiYmJiYmJiYp3gcEWbAw", "bbbbbbbbbbbbbbbb"},
+	} {
+		cryptRand = bytes.NewBufferString(test.iv)
+		got, err := Obscure(test.in)
+		cryptRand = rand.Reader
+		assert.NoError(t, err)
+		assert.Equal(t, test.want, got)
+		recoveredIn, err := Reveal(got)
+		assert.NoError(t, err)
+		assert.Equal(t, test.in, recoveredIn, "not bidirectional")
+		// Now the Must variants
+		cryptRand = bytes.NewBufferString(test.iv)
+		got = MustObscure(test.in)
+		cryptRand = rand.Reader
+		assert.Equal(t, test.want, got)
+		recoveredIn = MustReveal(got)
+		assert.Equal(t, test.in, recoveredIn, "not bidirectional")
+
+	}
+}
diff --git a/fs/testdata/enc-invalid.conf b/fs/config/testdata/enc-invalid.conf
similarity index 100%
rename from fs/testdata/enc-invalid.conf
rename to fs/config/testdata/enc-invalid.conf
diff --git a/fs/testdata/enc-short.conf b/fs/config/testdata/enc-short.conf
similarity index 100%
rename from fs/testdata/enc-short.conf
rename to fs/config/testdata/enc-short.conf
diff --git a/fs/testdata/enc-too-new.conf b/fs/config/testdata/enc-too-new.conf
similarity index 100%
rename from fs/testdata/enc-too-new.conf
rename to fs/config/testdata/enc-too-new.conf
diff --git a/fs/testdata/encrypted.conf b/fs/config/testdata/encrypted.conf
similarity index 100%
rename from fs/testdata/encrypted.conf
rename to fs/config/testdata/encrypted.conf
diff --git a/fs/testdata/plain.conf b/fs/config/testdata/plain.conf
similarity index 100%
rename from fs/testdata/plain.conf
rename to fs/config/testdata/plain.conf
diff --git a/fs/deletemode.go b/fs/deletemode.go
new file mode 100644
index 000000000..9e16373d9
--- /dev/null
+++ b/fs/deletemode.go
@@ -0,0 +1,14 @@
+package fs
+
+// DeleteMode describes the possible delete modes in the config
+type DeleteMode byte
+
+// DeleteMode constants
+const (
+	DeleteModeOff DeleteMode = iota
+	DeleteModeBefore
+	DeleteModeDuring
+	DeleteModeAfter
+	DeleteModeOnly
+	DeleteModeDefault = DeleteModeAfter
+)
diff --git a/fs/direntries.go b/fs/direntries.go
new file mode 100644
index 000000000..b3ae9eaad
--- /dev/null
+++ b/fs/direntries.go
@@ -0,0 +1,81 @@
+package fs
+
+import "fmt"
+
+// DirEntries is a slice of Object or *Dir
+type DirEntries []DirEntry
+
+// Len is part of sort.Interface.
+func (ds DirEntries) Len() int {
+	return len(ds)
+}
+
+// Swap is part of sort.Interface.
+func (ds DirEntries) Swap(i, j int) {
+	ds[i], ds[j] = ds[j], ds[i]
+}
+
+// Less is part of sort.Interface.
+func (ds DirEntries) Less(i, j int) bool {
+	return ds[i].Remote() < ds[j].Remote()
+}
+
+// ForObject runs the function supplied on every object in the entries
+func (ds DirEntries) ForObject(fn func(o Object)) {
+	for _, entry := range ds {
+		o, ok := entry.(Object)
+		if ok {
+			fn(o)
+		}
+	}
+}
+
+// ForObjectError runs the function supplied on every object in the entries
+func (ds DirEntries) ForObjectError(fn func(o Object) error) error {
+	for _, entry := range ds {
+		o, ok := entry.(Object)
+		if ok {
+			err := fn(o)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// ForDir runs the function supplied on every Directory in the entries
+func (ds DirEntries) ForDir(fn func(dir Directory)) {
+	for _, entry := range ds {
+		dir, ok := entry.(Directory)
+		if ok {
+			fn(dir)
+		}
+	}
+}
+
+// ForDirError runs the function supplied on every Directory in the entries
+func (ds DirEntries) ForDirError(fn func(dir Directory) error) error {
+	for _, entry := range ds {
+		dir, ok := entry.(Directory)
+		if ok {
+			err := fn(dir)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// DirEntryType returns a string description of the DirEntry, either
+// "object", "directory" or "unknown type XXX"
+func DirEntryType(d DirEntry) string {
+	switch d.(type) {
+	case Object:
+		return "object"
+	case Directory:
+		return "directory"
+	}
+	return fmt.Sprintf("unknown type %T", d)
+}
diff --git a/fs/driveletter.go b/fs/driveletter.go
deleted file mode 100644
index 65b4e2567..000000000
--- a/fs/driveletter.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !windows
-
-package fs
-
-// isDriveLetter returns a bool indicating whether name is a valid
-// Windows drive letter
-//
-// On non windows platforms we don't have drive letters so we always
-// return false
-func isDriveLetter(name string) bool {
-	return false
-}
diff --git a/fs/driveletter/driveletter.go b/fs/driveletter/driveletter.go
new file mode 100644
index 000000000..322b244a4
--- /dev/null
+++ b/fs/driveletter/driveletter.go
@@ -0,0 +1,14 @@
+// Package driveletter returns whether a name is a valid drive letter
+
+// +build !windows
+
+package driveletter
+
+// IsDriveLetter returns a bool indicating whether name is a valid
+// Windows drive letter
+//
+// On non windows platforms we don't have drive letters so we always
+// return false
+func IsDriveLetter(name string) bool {
+	return false
+}
diff --git a/fs/driveletter_windows.go b/fs/driveletter/driveletter_windows.go
similarity index 56%
rename from fs/driveletter_windows.go
rename to fs/driveletter/driveletter_windows.go
index ee9160a37..7f63b94d7 100644
--- a/fs/driveletter_windows.go
+++ b/fs/driveletter/driveletter_windows.go
@@ -1,10 +1,10 @@
 // +build windows
 
-package fs
+package driveletter
 
-// isDriveLetter returns a bool indicating whether name is a valid
+// IsDriveLetter returns a bool indicating whether name is a valid
 // Windows drive letter
-func isDriveLetter(name string) bool {
+func IsDriveLetter(name string) bool {
 	if len(name) != 1 {
 		return false
 	}
diff --git a/fs/dump.go b/fs/dump.go
new file mode 100644
index 000000000..916628ab0
--- /dev/null
+++ b/fs/dump.go
@@ -0,0 +1,89 @@
+package fs
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// DumpFlags describes the Dump options in force
+type DumpFlags int
+
+// DumpFlags definitions
+const (
+	DumpHeaders DumpFlags = 1 << iota
+	DumpBodies
+	DumpRequests
+	DumpResponses
+	DumpAuth
+	DumpFilters
+)
+
+var dumpFlags = []struct {
+	flag DumpFlags
+	name string
+}{
+	{DumpHeaders, "headers"},
+	{DumpBodies, "bodies"},
+	{DumpRequests, "requests"},
+	{DumpResponses, "responses"},
+	{DumpAuth, "auth"},
+	{DumpFilters, "filters"},
+}
+
+// DumpFlagsList is a list of dump flags used in the help
+var DumpFlagsList string
+
+func init() {
+	// calculate the dump flags list
+	var out []string
+	for _, info := range dumpFlags {
+		out = append(out, info.name)
+	}
+	DumpFlagsList = strings.Join(out, ",")
+}
+
+// String turns a DumpFlags into a string
+func (f DumpFlags) String() string {
+	var out []string
+	for _, info := range dumpFlags {
+		if f&info.flag != 0 {
+			out = append(out, info.name)
+			f &^= info.flag
+		}
+	}
+	if f != 0 {
+		out = append(out, fmt.Sprintf("Unknown-0x%X", int(f)))
+	}
+	return strings.Join(out, ",")
+}
+
+// Set a DumpFlags as a comma separated list of flags
+func (f *DumpFlags) Set(s string) error {
+	var flags DumpFlags
+	parts := strings.Split(s, ",")
+	for _, part := range parts {
+		found := false
+		part = strings.ToLower(strings.TrimSpace(part))
+		if part == "" {
+			continue
+		}
+		for _, info := range dumpFlags {
+			if part == info.name {
+				found = true
+				flags |= info.flag
+			}
+		}
+		if !found {
+			return errors.Errorf("Unknown dump flag %q", part)
+		}
+	}
+	*f = flags
+	return nil
+}
+
+// Type of the value
+func (f *DumpFlags) Type() string {
+	return "string"
+}
diff --git a/fs/dump_test.go b/fs/dump_test.go
new file mode 100644
index 000000000..922643353
--- /dev/null
+++ b/fs/dump_test.go
@@ -0,0 +1,58 @@
+package fs
+
+import (
+	"testing"
+
+	"github.com/spf13/pflag"
+	"github.com/stretchr/testify/assert"
+)
+
+// Check it satisfies the interface
+var _ pflag.Value = (*DumpFlags)(nil)
+
+func TestDumpFlagsString(t *testing.T) {
+	assert.Equal(t, "", DumpFlags(0).String())
+	assert.Equal(t, "headers", (DumpHeaders).String())
+	assert.Equal(t, "headers,bodies", (DumpHeaders | DumpBodies).String())
+	assert.Equal(t, "headers,bodies,requests,responses,auth,filters", (DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters).String())
+	assert.Equal(t, "headers,Unknown-0x8000", (DumpHeaders | DumpFlags(0x8000)).String())
+}
+
+func TestDumpFlagsSet(t *testing.T) {
+	for _, test := range []struct {
+		in      string
+		want    DumpFlags
+		wantErr string
+	}{
+		{"", DumpFlags(0), ""},
+		{"bodies", DumpBodies, ""},
+		{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
+		{"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""},
+		{"headers,bodies,requests,responses,auth,filters", DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""},
+		{"headers,bodies,unknown,auth", 0, "Unknown dump flag \"unknown\""},
+	} {
+		f := DumpFlags(-1)
+		initial := f
+		err := f.Set(test.in)
+		if err != nil {
+			if test.wantErr == "" {
+				t.Errorf("Got an error when not expecting one on %q: %v", test.in, err)
+			} else {
+				assert.Contains(t, err.Error(), test.wantErr)
+			}
+			assert.Equal(t, initial, f, test.want)
+		} else {
+			if test.wantErr != "" {
+				t.Errorf("Got no error when expecting one on %q", test.in)
+			} else {
+				assert.Equal(t, test.want, f)
+			}
+		}
+
+	}
+}
+
+func TestDumpFlagsType(t *testing.T) {
+	f := DumpFlags(0)
+	assert.Equal(t, "string", f.Type())
+}
diff --git a/fs/filter.go b/fs/filter/filter.go
similarity index 59%
rename from fs/filter.go
rename to fs/filter/filter.go
index ad4221cfa..78a6b4058 100644
--- a/fs/filter.go
+++ b/fs/filter/filter.go
@@ -1,43 +1,22 @@
-// Control the filtering of files
-
-package fs
+// Package filter controls the filtering of files
+package filter
 
 import (
 	"bufio"
 	"fmt"
+	"log"
 	"os"
 	"path"
 	"regexp"
-	"strconv"
 	"strings"
 	"time"
 
+	"github.com/ncw/rclone/fs"
 	"github.com/pkg/errors"
 )
 
-// Global
-var (
-	// Flags
-	deleteExcluded = BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
-	filterRule     = StringArrayP("filter", "f", nil, "Add a file-filtering rule")
-	filterFrom     = StringArrayP("filter-from", "", nil, "Read filtering patterns from a file")
-	excludeRule    = StringArrayP("exclude", "", nil, "Exclude files matching pattern")
-	excludeFrom    = StringArrayP("exclude-from", "", nil, "Read exclude patterns from file")
-	excludeFile    = StringP("exclude-if-present", "", "", "Exclude directories if filename is present")
-	includeRule    = StringArrayP("include", "", nil, "Include files matching pattern")
-	includeFrom    = StringArrayP("include-from", "", nil, "Read include patterns from file")
-	filesFrom      = StringArrayP("files-from", "", nil, "Read list of source-file names from file")
-	minAge         = StringP("min-age", "", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
-	maxAge         = StringP("max-age", "", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
-	minSize        = SizeSuffix(-1)
-	maxSize        = SizeSuffix(-1)
-	//cvsExclude     = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
-)
-
-func init() {
-	VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix b|k|M|G")
-	VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix b|k|M|G")
-}
+// Active is the globally active filter
+var Active = mustNewFilter(nil)
 
 // rule is one filter rule
 type rule struct {
@@ -96,167 +75,137 @@ func (rs *rules) len() int {
 // FilesMap describes the map of files to transfer
 type FilesMap map[string]struct{}
 
+// Opt configues the filter
+type Opt struct {
+	DeleteExcluded bool
+	FilterRule     []string
+	FilterFrom     []string
+	ExcludeRule    []string
+	ExcludeFrom    []string
+	ExcludeFile    string
+	IncludeRule    []string
+	IncludeFrom    []string
+	FilesFrom      []string
+	MinAge         fs.Duration
+	MaxAge         fs.Duration
+	MinSize        fs.SizeSuffix
+	MaxSize        fs.SizeSuffix
+}
+
+const unusedAge = fs.Duration((1 << 63) - 1)
+
+// DefaultOpt is the default config for the filter
+var DefaultOpt = Opt{
+	MinAge:  unusedAge,
+	MaxAge:  unusedAge,
+	MinSize: fs.SizeSuffix(-1),
+	MaxSize: fs.SizeSuffix(-1),
+}
+
 // Filter describes any filtering in operation
 type Filter struct {
-	DeleteExcluded bool
-	MinSize        int64
-	MaxSize        int64
-	ModTimeFrom    time.Time
-	ModTimeTo      time.Time
-	fileRules      rules
-	dirRules       rules
-	ExcludeFile    string
-	files          FilesMap // files if filesFrom
-	dirs           FilesMap // dirs from filesFrom
+	Opt         Opt
+	ModTimeFrom time.Time
+	ModTimeTo   time.Time
+	fileRules   rules
+	dirRules    rules
+	files       FilesMap // files if filesFrom
+	dirs        FilesMap // dirs from filesFrom
 }
 
-// We use time conventions
-var ageSuffixes = []struct {
-	Suffix     string
-	Multiplier time.Duration
-}{
-	{Suffix: "ms", Multiplier: time.Millisecond},
-	{Suffix: "s", Multiplier: time.Second},
-	{Suffix: "m", Multiplier: time.Minute},
-	{Suffix: "h", Multiplier: time.Hour},
-	{Suffix: "d", Multiplier: time.Hour * 24},
-	{Suffix: "w", Multiplier: time.Hour * 24 * 7},
-	{Suffix: "M", Multiplier: time.Hour * 24 * 30},
-	{Suffix: "y", Multiplier: time.Hour * 24 * 365},
+// NewFilter parses the command line options and creates a Filter
+// object.  If opt is nil, then DefaultOpt will be used
+func NewFilter(opt *Opt) (f *Filter, err error) {
+	f = &Filter{}
 
-	// Default to second
-	{Suffix: "", Multiplier: time.Second},
-}
+	// Make a copy of the options
+	if opt != nil {
+		f.Opt = *opt
+	} else {
+		f.Opt = DefaultOpt
+	}
 
-// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
-func ParseDuration(age string) (time.Duration, error) {
-	var period float64
-
-	for _, ageSuffix := range ageSuffixes {
-		if strings.HasSuffix(age, ageSuffix.Suffix) {
-			numberString := age[:len(age)-len(ageSuffix.Suffix)]
-			var err error
-			period, err = strconv.ParseFloat(numberString, 64)
-			if err != nil {
-				return time.Duration(0), err
-			}
-			period *= float64(ageSuffix.Multiplier)
-			break
+	// Filter flags
+	if f.Opt.MinAge != unusedAge {
+		f.ModTimeTo = time.Now().Add(-time.Duration(f.Opt.MinAge))
+		fs.Debugf(nil, "--min-age %v to %v", f.Opt.MinAge, f.ModTimeTo)
+	}
+	if f.Opt.MaxAge != unusedAge {
+		f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge))
+		if !f.ModTimeTo.IsZero() && f.ModTimeFrom.Before(f.ModTimeTo) {
+			log.Fatal("filter: --min-age can't be larger than --max-age")
 		}
+		fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
 	}
 
-	return time.Duration(period), nil
-}
-
-// NewFilter parses the command line options and creates a Filter object
-func NewFilter() (f *Filter, err error) {
-	f = &Filter{
-		DeleteExcluded: *deleteExcluded,
-		MinSize:        int64(minSize),
-		MaxSize:        int64(maxSize),
-	}
 	addImplicitExclude := false
 	foundExcludeRule := false
 
-	if includeRule != nil {
-		for _, rule := range *includeRule {
-			err = f.Add(true, rule)
-			if err != nil {
-				return nil, err
-			}
-			addImplicitExclude = true
+	for _, rule := range f.Opt.IncludeRule {
+		err = f.Add(true, rule)
+		if err != nil {
+			return nil, err
 		}
+		addImplicitExclude = true
 	}
-	if includeFrom != nil {
-		for _, rule := range *includeFrom {
-			err := forEachLine(rule, func(line string) error {
-				return f.Add(true, line)
-			})
-			if err != nil {
-				return nil, err
-			}
-			addImplicitExclude = true
+	for _, rule := range f.Opt.IncludeFrom {
+		err := forEachLine(rule, func(line string) error {
+			return f.Add(true, line)
+		})
+		if err != nil {
+			return nil, err
 		}
+		addImplicitExclude = true
 	}
-	if excludeRule != nil {
-		for _, rule := range *excludeRule {
-			err = f.Add(false, rule)
-			if err != nil {
-				return nil, err
-			}
-			foundExcludeRule = true
+	for _, rule := range f.Opt.ExcludeRule {
+		err = f.Add(false, rule)
+		if err != nil {
+			return nil, err
 		}
+		foundExcludeRule = true
 	}
-	if excludeFrom != nil {
-		for _, rule := range *excludeFrom {
-			err := forEachLine(rule, func(line string) error {
-				return f.Add(false, line)
-			})
-			if err != nil {
-				return nil, err
-			}
-			foundExcludeRule = true
+	for _, rule := range f.Opt.ExcludeFrom {
+		err := forEachLine(rule, func(line string) error {
+			return f.Add(false, line)
+		})
+		if err != nil {
+			return nil, err
 		}
+		foundExcludeRule = true
 	}
 
 	if addImplicitExclude && foundExcludeRule {
-		Infof(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
+		fs.Infof(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
 	}
 
-	if filterRule != nil {
-		for _, rule := range *filterRule {
-			err = f.AddRule(rule)
-			if err != nil {
-				return nil, err
-			}
+	for _, rule := range f.Opt.FilterRule {
+		err = f.AddRule(rule)
+		if err != nil {
+			return nil, err
 		}
 	}
-	if filterFrom != nil {
-		for _, rule := range *filterFrom {
-			err := forEachLine(rule, f.AddRule)
-			if err != nil {
-				return nil, err
-			}
+	for _, rule := range f.Opt.FilterFrom {
+		err := forEachLine(rule, f.AddRule)
+		if err != nil {
+			return nil, err
 		}
 	}
-	if filesFrom != nil {
-		for _, rule := range *filesFrom {
-			f.initAddFile() // init to show --files-from set even if no files within
-			err := forEachLine(rule, func(line string) error {
-				return f.AddFile(line)
-			})
-			if err != nil {
-				return nil, err
-			}
+	for _, rule := range f.Opt.FilesFrom {
+		f.initAddFile() // init to show --files-from set even if no files within
+		err := forEachLine(rule, func(line string) error {
+			return f.AddFile(line)
+		})
+		if err != nil {
+			return nil, err
 		}
 	}
-	f.ExcludeFile = *excludeFile
 	if addImplicitExclude {
 		err = f.Add(false, "/**")
 		if err != nil {
 			return nil, err
 		}
 	}
-	if *minAge != "" {
-		duration, err := ParseDuration(*minAge)
-		if err != nil {
-			return nil, err
-		}
-		f.ModTimeTo = time.Now().Add(-duration)
-		Debugf(nil, "--min-age %v to %v", duration, f.ModTimeTo)
-	}
-	if *maxAge != "" {
-		duration, err := ParseDuration(*maxAge)
-		if err != nil {
-			return nil, err
-		}
-		f.ModTimeFrom = time.Now().Add(-duration)
-		if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
-			return nil, errors.New("argument --min-age can't be larger than --max-age")
-		}
-		Debugf(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
-	}
-	if Config.Dump&DumpFilters != 0 {
+	if fs.Config.Dump&fs.DumpFilters != 0 {
 		fmt.Println("--- start filters ---")
 		fmt.Println(f.DumpFilters())
 		fmt.Println("--- end filters ---")
@@ -264,6 +213,14 @@ func NewFilter() (f *Filter, err error) {
 	return f, nil
 }
 
+func mustNewFilter(opt *Opt) *Filter {
+	f, err := NewFilter(opt)
+	if err != nil {
+		panic(err)
+	}
+	return f
+}
+
 // addDirGlobs adds directory globs from the file glob passed in
 func (f *Filter) addDirGlobs(Include bool, glob string) error {
 	for _, dirGlob := range globToDirGlobs(glob) {
@@ -379,11 +336,11 @@ func (f *Filter) InActive() bool {
 	return (f.files == nil &&
 		f.ModTimeFrom.IsZero() &&
 		f.ModTimeTo.IsZero() &&
-		f.MinSize < 0 &&
-		f.MaxSize < 0 &&
+		f.Opt.MinSize < 0 &&
+		f.Opt.MaxSize < 0 &&
 		f.fileRules.len() == 0 &&
 		f.dirRules.len() == 0 &&
-		len(f.ExcludeFile) == 0)
+		len(f.Opt.ExcludeFile) == 0)
 }
 
 // includeRemote returns whether this remote passes the filter rules.
@@ -397,15 +354,15 @@ func (f *Filter) includeRemote(remote string) bool {
 }
 
 // ListContainsExcludeFile checks if exclude file is present in the list.
-func (f *Filter) ListContainsExcludeFile(entries DirEntries) bool {
-	if len(f.ExcludeFile) == 0 {
+func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool {
+	if len(f.Opt.ExcludeFile) == 0 {
 		return false
 	}
 	for _, entry := range entries {
-		obj, ok := entry.(Object)
+		obj, ok := entry.(fs.Object)
 		if ok {
 			basename := path.Base(obj.Remote())
-			if basename == f.ExcludeFile {
+			if basename == f.Opt.ExcludeFile {
 				return true
 			}
 		}
@@ -415,7 +372,7 @@ func (f *Filter) ListContainsExcludeFile(entries DirEntries) bool {
 
 // IncludeDirectory returns a function which checks whether this
 // directory should be included in the sync or not.
-func (f *Filter) IncludeDirectory(fs Fs) func(string) (bool, error) {
+func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) {
 	return func(remote string) (bool, error) {
 		remote = strings.Trim(remote, "/")
 		// first check if we need to remove directory based on
@@ -447,9 +404,9 @@ func (f *Filter) IncludeDirectory(fs Fs) func(string) (bool, error) {
 // DirContainsExcludeFile checks if exclude file is present in a
 // directroy. If fs is nil, it works properly if ExcludeFile is an
 // empty string (for testing).
-func (f *Filter) DirContainsExcludeFile(fs Fs, remote string) (bool, error) {
-	if len(Config.Filter.ExcludeFile) > 0 {
-		exists, err := FileExists(fs, path.Join(remote, Config.Filter.ExcludeFile))
+func (f *Filter) DirContainsExcludeFile(fremote fs.Fs, remote string) (bool, error) {
+	if len(f.Opt.ExcludeFile) > 0 {
+		exists, err := fs.FileExists(fremote, path.Join(remote, f.Opt.ExcludeFile))
 		if err != nil {
 			return false, err
 		}
@@ -474,10 +431,10 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
 	if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
 		return false
 	}
-	if f.MinSize >= 0 && size < f.MinSize {
+	if f.Opt.MinSize >= 0 && size < int64(f.Opt.MinSize) {
 		return false
 	}
-	if f.MaxSize >= 0 && size > f.MaxSize {
+	if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
 		return false
 	}
 	return f.includeRemote(remote)
@@ -486,7 +443,7 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
 // IncludeObject returns whether this object should be included into
 // the sync or not. This is a convenience function to avoid calling
 // o.ModTime(), which is an expensive operation.
-func (f *Filter) IncludeObject(o Object) bool {
+func (f *Filter) IncludeObject(o fs.Object) bool {
 	var modTime time.Time
 
 	if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
@@ -506,7 +463,7 @@ func forEachLine(path string, fn func(string) error) (err error) {
 	if err != nil {
 		return err
 	}
-	defer CheckClose(in, &err)
+	defer fs.CheckClose(in, &err)
 	scanner := bufio.NewScanner(in)
 	for scanner.Scan() {
 		line := scanner.Text()
diff --git a/fs/filter_test.go b/fs/filter/filter_test.go
similarity index 79%
rename from fs/filter_test.go
rename to fs/filter/filter_test.go
index 363448a9a..1ff52e1db 100644
--- a/fs/filter_test.go
+++ b/fs/filter/filter_test.go
@@ -1,4 +1,4 @@
-package fs
+package filter
 
 import (
 	"fmt"
@@ -8,47 +8,17 @@ import (
 	"testing"
 	"time"
 
+	"github.com/ncw/rclone/fs"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-func TestAgeSuffix(t *testing.T) {
-	for _, test := range []struct {
-		in   string
-		want float64
-		err  bool
-	}{
-		{"0", 0, false},
-		{"", 0, true},
-		{"1ms", float64(time.Millisecond), false},
-		{"1s", float64(time.Second), false},
-		{"1m", float64(time.Minute), false},
-		{"1h", float64(time.Hour), false},
-		{"1d", float64(time.Hour) * 24, false},
-		{"1w", float64(time.Hour) * 24 * 7, false},
-		{"1M", float64(time.Hour) * 24 * 30, false},
-		{"1y", float64(time.Hour) * 24 * 365, false},
-		{"1.5y", float64(time.Hour) * 24 * 365 * 1.5, false},
-		{"-1s", -float64(time.Second), false},
-		{"1.s", float64(time.Second), false},
-		{"1x", 0, true},
-	} {
-		duration, err := ParseDuration(test.in)
-		if test.err {
-			require.Error(t, err)
-		} else {
-			require.NoError(t, err)
-		}
-		assert.Equal(t, test.want, float64(duration))
-	}
-}
-
 func TestNewFilterDefault(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
-	assert.False(t, f.DeleteExcluded)
-	assert.Equal(t, int64(-1), f.MinSize)
-	assert.Equal(t, int64(-1), f.MaxSize)
+	assert.False(t, f.Opt.DeleteExcluded)
+	assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MinSize)
+	assert.Equal(t, fs.SizeSuffix(-1), f.Opt.MaxSize)
 	assert.Len(t, f.fileRules.rules, 0)
 	assert.Len(t, f.dirRules.rules, 0)
 	assert.Nil(t, f.files)
@@ -70,22 +40,22 @@ func testFile(t *testing.T, contents string) string {
 }
 
 func TestNewFilterFull(t *testing.T) {
-	mins := int64(100 * 1024)
-	maxs := int64(1000 * 1024)
-	isFalse := false
-	isTrue := true
+	Opt := DefaultOpt
+
+	mins := fs.SizeSuffix(100 * 1024)
+	maxs := fs.SizeSuffix(1000 * 1024)
 
 	// Set up the input
-	deleteExcluded = &isTrue
-	filterRule = &[]string{"- filter1", "- filter1b"}
-	filterFrom = &[]string{testFile(t, "#comment\n+ filter2\n- filter3\n")}
-	excludeRule = &[]string{"exclude1"}
-	excludeFrom = &[]string{testFile(t, "#comment\nexclude2\nexclude3\n")}
-	includeRule = &[]string{"include1"}
-	includeFrom = &[]string{testFile(t, "#comment\ninclude2\ninclude3\n")}
-	filesFrom = &[]string{testFile(t, "#comment\nfiles1\nfiles2\n")}
-	minSize = SizeSuffix(mins)
-	maxSize = SizeSuffix(maxs)
+	Opt.DeleteExcluded = true
+	Opt.FilterRule = []string{"- filter1", "- filter1b"}
+	Opt.FilterFrom = []string{testFile(t, "#comment\n+ filter2\n- filter3\n")}
+	Opt.ExcludeRule = []string{"exclude1"}
+	Opt.ExcludeFrom = []string{testFile(t, "#comment\nexclude2\nexclude3\n")}
+	Opt.IncludeRule = []string{"include1"}
+	Opt.IncludeFrom = []string{testFile(t, "#comment\ninclude2\ninclude3\n")}
+	Opt.FilesFrom = []string{testFile(t, "#comment\nfiles1\nfiles2\n")}
+	Opt.MinSize = mins
+	Opt.MaxSize = maxs
 
 	rm := func(p string) {
 		err := os.Remove(p)
@@ -95,27 +65,17 @@ func TestNewFilterFull(t *testing.T) {
 	}
 	// Reset the input
 	defer func() {
-		rm((*filterFrom)[0])
-		rm((*excludeFrom)[0])
-		rm((*includeFrom)[0])
-		rm((*filesFrom)[0])
-		minSize = -1
-		maxSize = -1
-		deleteExcluded = &isFalse
-		filterRule = nil
-		filterFrom = nil
-		excludeRule = nil
-		excludeFrom = nil
-		includeRule = nil
-		includeFrom = nil
-		filesFrom = nil
+		rm(Opt.FilterFrom[0])
+		rm(Opt.ExcludeFrom[0])
+		rm(Opt.IncludeFrom[0])
+		rm(Opt.FilesFrom[0])
 	}()
 
-	f, err := NewFilter()
+	f, err := NewFilter(&Opt)
 	require.NoError(t, err)
-	assert.True(t, f.DeleteExcluded)
-	assert.Equal(t, f.MinSize, mins)
-	assert.Equal(t, f.MaxSize, maxs)
+	assert.True(t, f.Opt.DeleteExcluded)
+	assert.Equal(t, f.Opt.MinSize, mins)
+	assert.Equal(t, f.Opt.MaxSize, maxs)
 	got := f.DumpFilters()
 	want := `--- File filter rules ---
 + (^|/)include1$
@@ -153,7 +113,7 @@ type includeTest struct {
 func testInclude(t *testing.T, f *Filter, tests []includeTest) {
 	for _, test := range tests {
 		got := f.Include(test.in, test.size, time.Unix(test.modTime, 0))
-		assert.Equal(t, test.want, got, test.in, test.size, test.modTime)
+		assert.Equal(t, test.want, got, fmt.Sprintf("in=%q, size=%v, modTime=%v", test.in, test.size, time.Unix(test.modTime, 0)))
 	}
 }
 
@@ -171,7 +131,7 @@ func testDirInclude(t *testing.T, f *Filter, tests []includeDirTest) {
 }
 
 func TestNewFilterIncludeFiles(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
 	err = f.AddFile("file1.jpg")
 	require.NoError(t, err)
@@ -192,7 +152,7 @@ func TestNewFilterIncludeFiles(t *testing.T) {
 }
 
 func TestNewFilterIncludeFilesDirs(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
 	for _, path := range []string{
 		"path/to/dir/file1.png",
@@ -224,9 +184,9 @@ func TestNewFilterIncludeFilesDirs(t *testing.T) {
 }
 
 func TestNewFilterMinSize(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
-	f.MinSize = 100
+	f.Opt.MinSize = 100
 	testInclude(t, f, []includeTest{
 		{"file1.jpg", 100, 0, true},
 		{"file2.jpg", 101, 0, true},
@@ -236,9 +196,9 @@ func TestNewFilterMinSize(t *testing.T) {
 }
 
 func TestNewFilterMaxSize(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
-	f.MaxSize = 100
+	f.Opt.MaxSize = 100
 	testInclude(t, f, []includeTest{
 		{"file1.jpg", 100, 0, true},
 		{"file2.jpg", 101, 0, false},
@@ -248,7 +208,7 @@ func TestNewFilterMaxSize(t *testing.T) {
 }
 
 func TestNewFilterMinAndMaxAge(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
 	f.ModTimeFrom = time.Unix(1440000002, 0)
 	f.ModTimeTo = time.Unix(1440000003, 0)
@@ -263,7 +223,7 @@ func TestNewFilterMinAndMaxAge(t *testing.T) {
 }
 
 func TestNewFilterMinAge(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
 	f.ModTimeTo = time.Unix(1440000002, 0)
 	testInclude(t, f, []includeTest{
@@ -277,7 +237,7 @@ func TestNewFilterMinAge(t *testing.T) {
 }
 
 func TestNewFilterMaxAge(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
 	f.ModTimeFrom = time.Unix(1440000002, 0)
 	testInclude(t, f, []includeTest{
@@ -291,7 +251,7 @@ func TestNewFilterMaxAge(t *testing.T) {
 }
 
 func TestNewFilterMatches(t *testing.T) {
-	f, err := NewFilter()
+	f, err := NewFilter(nil)
 	require.NoError(t, err)
 	add := func(s string) {
 		err := f.AddRule(s)
@@ -396,7 +356,7 @@ func TestFilterAddDirRuleOrFileRule(t *testing.T) {
 + (^|/)a/$`,
 		},
 	} {
-		f, err := NewFilter()
+		f, err := NewFilter(nil)
 		require.NoError(t, err)
 		err = f.Add(test.included, test.glob)
 		require.NoError(t, err)
@@ -464,7 +424,7 @@ func TestFilterMatchesFromDocs(t *testing.T) {
 		{"\\\\.jpg", true, "\\.jpg"},
 		{"\\[one\\].jpg", true, "[one].jpg"},
 	} {
-		f, err := NewFilter()
+		f, err := NewFilter(nil)
 		require.NoError(t, err)
 		err = f.Add(true, test.glob)
 		require.NoError(t, err)
diff --git a/fs/filter/filterflags/filterflags.go b/fs/filter/filterflags/filterflags.go
new file mode 100644
index 000000000..a25da9145
--- /dev/null
+++ b/fs/filter/filterflags/filterflags.go
@@ -0,0 +1,31 @@
+// Package filterflags implements command line flags to set up a filter
+package filterflags
+
+import (
+	"github.com/ncw/rclone/fs/config/flags"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/spf13/pflag"
+)
+
+// Options set by command line flags
+var (
+	Opt = filter.DefaultOpt
+)
+
+// AddFlags adds the non filing system specific flags to the command
+func AddFlags(flagSet *pflag.FlagSet) {
+	flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
+	flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
+	flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")
+	flags.StringArrayVarP(flagSet, &Opt.ExcludeRule, "exclude", "", nil, "Exclude files matching pattern")
+	flags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, "exclude-from", "", nil, "Read exclude patterns from file")
+	flags.StringVarP(flagSet, &Opt.ExcludeFile, "exclude-if-present", "", "", "Exclude directories if filename is present")
+	flags.StringArrayVarP(flagSet, &Opt.IncludeRule, "include", "", nil, "Include files matching pattern")
+	flags.StringArrayVarP(flagSet, &Opt.IncludeFrom, "include-from", "", nil, "Read include patterns from file")
+	flags.StringArrayVarP(flagSet, &Opt.FilesFrom, "files-from", "", nil, "Read list of source-file names from file")
+	flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
+	flags.FVarP(flagSet, &Opt.MaxAge, "max-age", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
+	flags.FVarP(flagSet, &Opt.MinSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix b|k|M|G")
+	flags.FVarP(flagSet, &Opt.MaxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix b|k|M|G")
+	//cvsExclude     = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
+}
diff --git a/fs/glob.go b/fs/filter/glob.go
similarity index 99%
rename from fs/glob.go
rename to fs/filter/glob.go
index 66f2f58eb..84c03f484 100644
--- a/fs/glob.go
+++ b/fs/filter/glob.go
@@ -1,6 +1,6 @@
 // rsync style glob parser
 
-package fs
+package filter
 
 import (
 	"bytes"
diff --git a/fs/glob_test.go b/fs/filter/glob_test.go
similarity index 99%
rename from fs/glob_test.go
rename to fs/filter/glob_test.go
index 81d53389c..f9736978a 100644
--- a/fs/glob_test.go
+++ b/fs/filter/glob_test.go
@@ -1,4 +1,4 @@
-package fs
+package filter
 
 import (
 	"testing"
diff --git a/fs/flags_test.go b/fs/flags_test.go
deleted file mode 100644
index 08589f445..000000000
--- a/fs/flags_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package fs
-
-import (
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-)
-
-func TestSizeSuffixString(t *testing.T) {
-	for _, test := range []struct {
-		in   float64
-		want string
-	}{
-		{0, "0"},
-		{102, "102"},
-		{1024, "1k"},
-		{1024 * 1024, "1M"},
-		{1024 * 1024 * 1024, "1G"},
-		{10 * 1024 * 1024 * 1024, "10G"},
-		{10.1 * 1024 * 1024 * 1024, "10.100G"},
-		{-1, "off"},
-		{-100, "off"},
-	} {
-		ss := SizeSuffix(test.in)
-		got := ss.String()
-		assert.Equal(t, test.want, got)
-	}
-}
-
-func TestSizeSuffixUnit(t *testing.T) {
-	for _, test := range []struct {
-		in   float64
-		want string
-	}{
-		{0, "0 Bytes"},
-		{102, "102 Bytes"},
-		{1024, "1 kBytes"},
-		{1024 * 1024, "1 MBytes"},
-		{1024 * 1024 * 1024, "1 GBytes"},
-		{10 * 1024 * 1024 * 1024, "10 GBytes"},
-		{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
-		{-1, "off"},
-		{-100, "off"},
-	} {
-		ss := SizeSuffix(test.in)
-		got := ss.Unit("Bytes")
-		assert.Equal(t, test.want, got)
-	}
-}
-
-func TestSizeSuffixSet(t *testing.T) {
-	for _, test := range []struct {
-		in   string
-		want int64
-		err  bool
-	}{
-		{"0", 0, false},
-		{"1b", 1, false},
-		{"102B", 102, false},
-		{"0.1k", 102, false},
-		{"0.1", 102, false},
-		{"1K", 1024, false},
-		{"1", 1024, false},
-		{"2.5", 1024 * 2.5, false},
-		{"1M", 1024 * 1024, false},
-		{"1.g", 1024 * 1024 * 1024, false},
-		{"10G", 10 * 1024 * 1024 * 1024, false},
-		{"off", -1, false},
-		{"OFF", -1, false},
-		{"", 0, true},
-		{"1p", 0, true},
-		{"1.p", 0, true},
-		{"1p", 0, true},
-		{"-1K", 0, true},
-	} {
-		ss := SizeSuffix(0)
-		err := ss.Set(test.in)
-		if test.err {
-			require.Error(t, err)
-		} else {
-			require.NoError(t, err)
-		}
-		assert.Equal(t, test.want, int64(ss))
-	}
-}
-
-func TestBwTimetableSet(t *testing.T) {
-	for _, test := range []struct {
-		in   string
-		want BwTimetable
-		err  bool
-	}{
-		{"", BwTimetable{}, true},
-		{"0", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 0}}, false},
-		{"666", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 666 * 1024}}, false},
-		{"10:20,666", BwTimetable{BwTimeSlot{hhmm: 1020, bandwidth: 666 * 1024}}, false},
-		{
-			"11:00,333 13:40,666 23:50,10M 23:59,off",
-			BwTimetable{
-				BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
-				BwTimeSlot{hhmm: 1340, bandwidth: 666 * 1024},
-				BwTimeSlot{hhmm: 2350, bandwidth: 10 * 1024 * 1024},
-				BwTimeSlot{hhmm: 2359, bandwidth: -1},
-			},
-			false,
-		},
-		{"bad,bad", BwTimetable{}, true},
-		{"bad bad", BwTimetable{}, true},
-		{"bad", BwTimetable{}, true},
-		{"1000X", BwTimetable{}, true},
-		{"2401,666", BwTimetable{}, true},
-		{"1061,666", BwTimetable{}, true},
-	} {
-		tt := BwTimetable{}
-		err := tt.Set(test.in)
-		if test.err {
-			require.Error(t, err)
-		} else {
-			require.NoError(t, err)
-		}
-		assert.Equal(t, test.want, tt)
-	}
-}
-
-func TestBwTimetableLimitAt(t *testing.T) {
-	for _, test := range []struct {
-		tt   BwTimetable
-		now  time.Time
-		want BwTimeSlot
-	}{
-		{
-			BwTimetable{},
-			time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
-			BwTimeSlot{hhmm: 0, bandwidth: -1},
-		},
-		{
-			BwTimetable{BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024}},
-			time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
-			BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
-		},
-		{
-			BwTimetable{
-				BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
-				BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
-				BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
-				BwTimeSlot{hhmm: 2350, bandwidth: -1},
-			},
-			time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
-			BwTimeSlot{hhmm: 2350, bandwidth: -1},
-		},
-		{
-			BwTimetable{
-				BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
-				BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
-				BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
-				BwTimeSlot{hhmm: 2350, bandwidth: -1},
-			},
-			time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
-			BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
-		},
-		{
-			BwTimetable{
-				BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
-				BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
-				BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
-				BwTimeSlot{hhmm: 2350, bandwidth: -1},
-			},
-			time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
-			BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
-		},
-		{
-			BwTimetable{
-				BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
-				BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
-				BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
-				BwTimeSlot{hhmm: 2350, bandwidth: -1},
-			},
-			time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
-			BwTimeSlot{hhmm: 2350, bandwidth: -1},
-		},
-	} {
-		slot := test.tt.LimitAt(test.now)
-		assert.Equal(t, test.want, slot)
-	}
-}
diff --git a/fs/fs.go b/fs/fs.go
index dc8a43751..df9ec7d38 100644
--- a/fs/fs.go
+++ b/fs/fs.go
@@ -14,6 +14,8 @@ import (
 	"strings"
 	"time"
 
+	"github.com/ncw/rclone/fs/driveletter"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/pkg/errors"
 )
 
@@ -29,7 +31,7 @@ const (
 // Globals
 var (
 	// Filesystem registry
-	fsRegistry []*RegInfo
+	Registry []*RegInfo
 	// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
 	ErrorNotFoundInConfigFile        = errors.New("didn't find section in config file")
 	ErrorCantPurge                   = errors.New("can't purge directory")
@@ -103,7 +105,7 @@ type OptionExample struct {
 //
 // Fs modules  should use this in an init() function
 func Register(info *RegInfo) {
-	fsRegistry = append(fsRegistry, info)
+	Registry = append(Registry, info)
 }
 
 // Fs is the interface a cloud storage system must provide
@@ -158,7 +160,7 @@ type Info interface {
 	Precision() time.Duration
 
 	// Returns the supported hash types of the filesystem
-	Hashes() HashSet
+	Hashes() hash.Set
 
 	// Features returns the optional features of this Fs
 	Features() *Features
@@ -190,7 +192,7 @@ type ObjectInfo interface {
 
 	// Hash returns the selected checksum of the file
 	// If no checksum is available it returns ""
-	Hash(HashType) (string, error)
+	Hash(hash.Type) (string, error)
 
 	// Storable says whether this object can be stored
 	Storable() bool
@@ -671,7 +673,7 @@ type Objects []Object
 // ObjectPair is a pair of Objects used to describe a potential copy
 // operation.
 type ObjectPair struct {
-	src, dst Object
+	Src, Dst Object
 }
 
 // ObjectPairChan is a channel of ObjectPair
@@ -681,7 +683,7 @@ type ObjectPairChan chan ObjectPair
 //
 // Services are looked up in the config file
 func Find(name string) (*RegInfo, error) {
-	for _, item := range fsRegistry {
+	for _, item := range Registry {
 		if item.Name == name {
 			return item, nil
 		}
@@ -702,16 +704,16 @@ func MustFind(name string) *RegInfo {
 	return fs
 }
 
-// Pattern to match an rclone url
-var matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
+// Matcher is a pattern to match an rclone URL
+var Matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
 
 // ParseRemote deconstructs a path into configName, fsPath, looking up
 // the fsName in the config file (returning NotFoundInConfigFile if not found)
 func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) {
-	parts := matcher.FindStringSubmatch(path)
+	parts := Matcher.FindStringSubmatch(path)
 	var fsName string
 	fsName, configName, fsPath = "local", "local", path
-	if parts != nil && !isDriveLetter(parts[1]) {
+	if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
 		configName, fsPath = parts[1], parts[2]
 		fsName = ConfigFileGet(configName, "type")
 		if fsName == "" {
@@ -741,10 +743,10 @@ func NewFs(path string) (Fs, error) {
 	return fsInfo.NewFs(configName, fsPath)
 }
 
-// temporaryLocalFs creates a local FS in the OS's temporary directory.
+// TemporaryLocalFs creates a local FS in the OS's temporary directory.
 //
 // No cleanup is performed, the caller must call Purge on the Fs themselves.
-func temporaryLocalFs() (Fs, error) {
+func TemporaryLocalFs() (Fs, error) {
 	path, err := ioutil.TempDir("", "rclone-spool")
 	if err == nil {
 		err = os.Remove(path)
@@ -777,3 +779,24 @@ func FileExists(fs Fs, remote string) (bool, error) {
 	}
 	return true, nil
 }
+
+// CalculateModifyWindow works out modify window for Fses passed in -
+// sets Config.ModifyWindow
+//
+// This is the largest modify window of all the fses in use, and the
+// user configured value
+func CalculateModifyWindow(fss ...Fs) {
+	for _, f := range fss {
+		if f != nil {
+			precision := f.Precision()
+			if precision > Config.ModifyWindow {
+				Config.ModifyWindow = precision
+			}
+			if precision == ModTimeNotSupported {
+				Infof(f, "Modify window not supported")
+				return
+			}
+		}
+	}
+	Infof(fss[0], "Modify window is %s", Config.ModifyWindow)
+}
diff --git a/fs/closed_conn_unsupported.go b/fs/fserrors/closed_conn_unsupported.go
similarity index 92%
rename from fs/closed_conn_unsupported.go
rename to fs/fserrors/closed_conn_unsupported.go
index cacd20adf..379d181ad 100644
--- a/fs/closed_conn_unsupported.go
+++ b/fs/fserrors/closed_conn_unsupported.go
@@ -1,6 +1,6 @@
 // +build plan9
 
-package fs
+package fserrors
 
 // isClosedConnErrorPlatform reports whether err is an error from use
 // of a closed network connection using platform specific error codes.
diff --git a/fs/error.go b/fs/fserrors/error.go
similarity index 98%
rename from fs/error.go
rename to fs/fserrors/error.go
index d2a0e12f8..e9ebbcb0e 100644
--- a/fs/error.go
+++ b/fs/fserrors/error.go
@@ -1,6 +1,5 @@
-// Errors and error handling
-
-package fs
+// Package fserrors provides errors and error handling
+package fserrors
 
 import (
 	"fmt"
diff --git a/fs/error_test.go b/fs/fserrors/error_test.go
similarity index 99%
rename from fs/error_test.go
rename to fs/fserrors/error_test.go
index 5900c6d45..bf55cb3f3 100644
--- a/fs/error_test.go
+++ b/fs/fserrors/error_test.go
@@ -1,4 +1,4 @@
-package fs
+package fserrors
 
 import (
 	"fmt"
diff --git a/fs/retriable_errors.go b/fs/fserrors/retriable_errors.go
similarity index 94%
rename from fs/retriable_errors.go
rename to fs/fserrors/retriable_errors.go
index d8a3bd4de..9ec0b5b67 100644
--- a/fs/retriable_errors.go
+++ b/fs/fserrors/retriable_errors.go
@@ -1,6 +1,6 @@
 // +build !plan9
 
-package fs
+package fserrors
 
 import (
 	"syscall"
diff --git a/fs/retriable_errors_windows.go b/fs/fserrors/retriable_errors_windows.go
similarity index 97%
rename from fs/retriable_errors_windows.go
rename to fs/fserrors/retriable_errors_windows.go
index a4b86a6d0..55c8c5985 100644
--- a/fs/retriable_errors_windows.go
+++ b/fs/fserrors/retriable_errors_windows.go
@@ -1,6 +1,6 @@
 // +build windows
 
-package fs
+package fserrors
 
 import (
 	"syscall"
diff --git a/fs/http.go b/fs/fshttp/http.go
similarity index 73%
rename from fs/http.go
rename to fs/fshttp/http.go
index 6271b529d..6fad8a4d4 100644
--- a/fs/http.go
+++ b/fs/fshttp/http.go
@@ -1,6 +1,5 @@
-// The HTTP based parts of the config, Transport and Client
-
-package fs
+// Package fshttp contains the common http parts of the config, Transport and Client
+package fshttp
 
 import (
 	"bytes"
@@ -12,6 +11,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/ncw/rclone/fs"
 	"golang.org/x/net/context" // switch to "context" when we stop supporting go1.6
 	"golang.org/x/time/rate"
 )
@@ -27,15 +27,15 @@ var (
 	tpsBucket   *rate.Limiter // for limiting number of http transactions per second
 )
 
-// Start the token bucket if necessary
-func startHTTPTokenBucket() {
-	if Config.TPSLimit > 0 {
-		tpsBurst := Config.TPSLimitBurst
+// StartHTTPTokenBucket starts the token bucket if necessary
+func StartHTTPTokenBucket() {
+	if fs.Config.TPSLimit > 0 {
+		tpsBurst := fs.Config.TPSLimitBurst
 		if tpsBurst < 1 {
 			tpsBurst = 1
 		}
-		tpsBucket = rate.NewLimiter(rate.Limit(Config.TPSLimit), tpsBurst)
-		Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", Config.TPSLimit, tpsBurst)
+		tpsBucket = rate.NewLimiter(rate.Limit(fs.Config.TPSLimit), tpsBurst)
+		fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.Config.TPSLimit, tpsBurst)
 	}
 }
 
@@ -108,8 +108,8 @@ func setDefaults(a, b interface{}) {
 	}
 }
 
-// Transport returns an http.RoundTripper with the correct timeouts
-func (ci *ConfigInfo) Transport() http.RoundTripper {
+// NewTransport returns an http.RoundTripper with the correct timeouts
+func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
 	noTransport.Do(func() {
 		// Start with a sensible set of defaults then override.
 		// This also means we get new stuff when it gets added to go
@@ -120,24 +120,24 @@ func (ci *ConfigInfo) Transport() http.RoundTripper {
 		t.TLSHandshakeTimeout = ci.ConnectTimeout
 		t.ResponseHeaderTimeout = ci.Timeout
 		t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
-		t.DisableCompression = *noGzip
+		t.DisableCompression = ci.NoGzip
 		// Set in http_old.go initTransport
 		//   t.Dial
 		// Set in http_new.go initTransport
 		//   t.DialContext
 		//   t.IdelConnTimeout
 		//   t.ExpectContinueTimeout
-		ci.initTransport(t)
+		initTransport(ci, t)
 		// Wrap that http.Transport in our own transport
-		transport = NewTransport(t, ci.Dump)
+		transport = newTransport(ci, t)
 	})
 	return transport
 }
 
-// Client returns an http.Client with the correct timeouts
-func (ci *ConfigInfo) Client() *http.Client {
+// NewClient returns an http.Client with the correct timeouts
+func NewClient(ci *fs.ConfigInfo) *http.Client {
 	return &http.Client{
-		Transport: ci.Transport(),
+		Transport: NewTransport(ci),
 	}
 }
 
@@ -146,16 +146,18 @@ func (ci *ConfigInfo) Client() *http.Client {
 // * Does logging
 type Transport struct {
 	*http.Transport
-	dump          DumpFlags
+	dump          fs.DumpFlags
 	filterRequest func(req *http.Request)
+	userAgent     string
 }
 
-// NewTransport wraps the http.Transport passed in and logs all
+// newTransport wraps the http.Transport passed in and logs all
 // roundtrips including the body if logBody is set.
-func NewTransport(transport *http.Transport, dump DumpFlags) *Transport {
+func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
 	return &Transport{
 		Transport: transport,
-		dump:      dump,
+		dump:      ci.Dump,
+		userAgent: ci.UserAgent,
 	}
 }
 
@@ -188,13 +190,13 @@ func checkServerTime(req *http.Request, resp *http.Response) {
 	}
 	date, err := http.ParseTime(dateString)
 	if err != nil {
-		Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
+		fs.Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
 		return
 	}
 	dt := time.Since(date)
 	const window = 5 * 60 * time.Second
 	if dt > window || dt < -window {
-		Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
+		fs.Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
 	}
 	checkedHostMu.Lock()
 	checkedHost[host] = struct{}{}
@@ -250,39 +252,39 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
 	if tpsBucket != nil {
 		tbErr := tpsBucket.Wait(context.Background()) // FIXME switch to req.Context() when we drop go1.6 support
 		if tbErr != nil {
-			Errorf(nil, "HTTP token bucket error: %v", err)
+			fs.Errorf(nil, "HTTP token bucket error: %v", err)
 		}
 	}
 	// Force user agent
-	req.Header.Set("User-Agent", *userAgent)
+	req.Header.Set("User-Agent", t.userAgent)
 	// Filter the request if required
 	if t.filterRequest != nil {
 		t.filterRequest(req)
 	}
 	// Logf request
-	if t.dump&(DumpHeaders|DumpBodies|DumpAuth|DumpRequests|DumpResponses) != 0 {
-		buf, _ := httputil.DumpRequestOut(req, t.dump&(DumpBodies|DumpRequests) != 0)
-		if t.dump&DumpAuth == 0 {
+	if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
+		buf, _ := httputil.DumpRequestOut(req, t.dump&(fs.DumpBodies|fs.DumpRequests) != 0)
+		if t.dump&fs.DumpAuth == 0 {
 			buf = cleanAuths(buf)
 		}
-		Debugf(nil, "%s", separatorReq)
-		Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
-		Debugf(nil, "%s", string(buf))
-		Debugf(nil, "%s", separatorReq)
+		fs.Debugf(nil, "%s", separatorReq)
+		fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
+		fs.Debugf(nil, "%s", string(buf))
+		fs.Debugf(nil, "%s", separatorReq)
 	}
 	// Do round trip
 	resp, err = t.Transport.RoundTrip(req)
 	// Logf response
-	if t.dump&(DumpHeaders|DumpBodies|DumpAuth|DumpRequests|DumpResponses) != 0 {
-		Debugf(nil, "%s", separatorResp)
-		Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
+	if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
+		fs.Debugf(nil, "%s", separatorResp)
+		fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
 		if err != nil {
-			Debugf(nil, "Error: %v", err)
+			fs.Debugf(nil, "Error: %v", err)
 		} else {
-			buf, _ := httputil.DumpResponse(resp, t.dump&(DumpBodies|DumpResponses) != 0)
-			Debugf(nil, "%s", string(buf))
+			buf, _ := httputil.DumpResponse(resp, t.dump&(fs.DumpBodies|fs.DumpResponses) != 0)
+			fs.Debugf(nil, "%s", string(buf))
 		}
-		Debugf(nil, "%s", separatorResp)
+		fs.Debugf(nil, "%s", separatorResp)
 	}
 	if err == nil {
 		checkServerTime(req, resp)
@@ -292,7 +294,7 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
 
 // NewDialer creates a net.Dialer structure with Timeout, Keepalive
 // and LocalAddr set from rclone flags.
-func (ci *ConfigInfo) NewDialer() *net.Dialer {
+func NewDialer(ci *fs.ConfigInfo) *net.Dialer {
 	dialer := &net.Dialer{
 		Timeout:   ci.ConnectTimeout,
 		KeepAlive: 30 * time.Second,
diff --git a/fs/http_new.go b/fs/fshttp/http_new.go
similarity index 50%
rename from fs/http_new.go
rename to fs/fshttp/http_new.go
index 68d8deecc..185f22e99 100644
--- a/fs/http_new.go
+++ b/fs/fshttp/http_new.go
@@ -2,18 +2,20 @@
 
 //+build go1.7
 
-package fs
+package fshttp
 
 import (
 	"context"
 	"net"
 	"net/http"
 	"time"
+
+	"github.com/ncw/rclone/fs"
 )
 
 // dial with context and timeouts
-func (ci *ConfigInfo) dialContextTimeout(ctx context.Context, network, address string) (net.Conn, error) {
-	dialer := ci.NewDialer()
+func dialContextTimeout(ctx context.Context, network, address string, ci *fs.ConfigInfo) (net.Conn, error) {
+	dialer := NewDialer(ci)
 	c, err := dialer.DialContext(ctx, network, address)
 	if err != nil {
 		return c, err
@@ -22,8 +24,10 @@ func (ci *ConfigInfo) dialContextTimeout(ctx context.Context, network, address s
 }
 
 // Initialise the http.Transport for go1.7+
-func (ci *ConfigInfo) initTransport(t *http.Transport) {
-	t.DialContext = ci.dialContextTimeout
+func initTransport(ci *fs.ConfigInfo, t *http.Transport) {
+	t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
+		return dialContextTimeout(ctx, network, addr, ci)
+	}
 	t.IdleConnTimeout = 60 * time.Second
 	t.ExpectContinueTimeout = ci.ConnectTimeout
 }
diff --git a/fs/fshttp/http_old.go b/fs/fshttp/http_old.go
new file mode 100644
index 000000000..d4c2bc68e
--- /dev/null
+++ b/fs/fshttp/http_old.go
@@ -0,0 +1,29 @@
+// HTTP parts pre go1.7
+
+//+build !go1.7
+
+package fshttp
+
+import (
+	"net"
+	"net/http"
+
+	"github.com/ncw/rclone/fs"
+)
+
+// dial with timeouts
+func dialTimeout(network, address string, ci *fs.ConfigInfo) (net.Conn, error) {
+	dialer := NewDialer(ci)
+	c, err := dialer.Dial(network, address)
+	if err != nil {
+		return c, err
+	}
+	return newTimeoutConn(c, ci.Timeout), nil
+}
+
+// Initialise the http.Transport for pre go1.7
+func initTransport(ci *fs.ConfigInfo, t *http.Transport) {
+	t.Dial = func(network, addr string) (net.Conn, error) {
+		return dialTimeout(network, addr, ci)
+	}
+}
diff --git a/fs/http_test.go b/fs/fshttp/http_test.go
similarity index 99%
rename from fs/http_test.go
rename to fs/fshttp/http_test.go
index eebd39d45..def673737 100644
--- a/fs/http_test.go
+++ b/fs/fshttp/http_test.go
@@ -1,6 +1,6 @@
 //+build go1.7
 
-package fs
+package fshttp
 
 import (
 	"fmt"
diff --git a/fs/path.go b/fs/fspath/path.go
similarity index 91%
rename from fs/path.go
rename to fs/fspath/path.go
index 1167e28c2..0a1a3eb1f 100644
--- a/fs/path.go
+++ b/fs/fspath/path.go
@@ -1,4 +1,5 @@
-package fs
+// Package fspath contains routines for fspath manipulation
+package fspath
 
 import (
 	"path"
diff --git a/fs/path_test.go b/fs/fspath/path_test.go
similarity index 98%
rename from fs/path_test.go
rename to fs/fspath/path_test.go
index cf455d031..6c3a1b294 100644
--- a/fs/path_test.go
+++ b/fs/fspath/path_test.go
@@ -1,4 +1,4 @@
-package fs
+package fspath
 
 import (
 	"fmt"
diff --git a/fs/hash.go b/fs/hash/hash.go
similarity index 75%
rename from fs/hash.go
rename to fs/hash/hash.go
index f62afd184..d415b18d8 100644
--- a/fs/hash.go
+++ b/fs/hash/hash.go
@@ -1,4 +1,4 @@
-package fs
+package hash
 
 import (
 	"crypto/md5"
@@ -14,8 +14,8 @@ import (
 	"github.com/spf13/pflag"
 )
 
-// HashType indicates a standard hashing algorithm
-type HashType int
+// Type indicates a standard hashing algorithm
+type Type int
 
 // ErrHashUnsupported should be returned by filesystem,
 // if it is requested to deliver an unsupported hash type.
@@ -23,7 +23,7 @@ var ErrHashUnsupported = errors.New("hash type not supported")
 
 const (
 	// HashMD5 indicates MD5 support
-	HashMD5 HashType = 1 << iota
+	HashMD5 Type = 1 << iota
 
 	// HashSHA1 indicates SHA-1 support
 	HashSHA1
@@ -33,7 +33,7 @@ const (
 	HashDropbox
 
 	// HashNone indicates no hashes are supported
-	HashNone HashType = 0
+	HashNone Type = 0
 )
 
 // SupportedHashes returns a set of all the supported hashes by
@@ -41,19 +41,19 @@ const (
 var SupportedHashes = NewHashSet(HashMD5, HashSHA1, HashDropbox)
 
 // HashWidth returns the width in characters for any HashType
-var HashWidth = map[HashType]int{
+var HashWidth = map[Type]int{
 	HashMD5:     32,
 	HashSHA1:    40,
 	HashDropbox: 64,
 }
 
-// HashStream will calculate hashes of all supported hash types.
-func HashStream(r io.Reader) (map[HashType]string, error) {
-	return HashStreamTypes(r, SupportedHashes)
+// Stream will calculate hashes of all supported hash types.
+func Stream(r io.Reader) (map[Type]string, error) {
+	return StreamTypes(r, SupportedHashes)
 }
 
-// HashStreamTypes will calculate hashes of the requested hash types.
-func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
+// StreamTypes will calculate hashes of the requested hash types.
+func StreamTypes(r io.Reader, set Set) (map[Type]string, error) {
 	hashers, err := hashFromTypes(set)
 	if err != nil {
 		return nil, err
@@ -63,7 +63,7 @@ func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
 	if err != nil {
 		return nil, err
 	}
-	var ret = make(map[HashType]string)
+	var ret = make(map[Type]string)
 	for k, v := range hashers {
 		ret[k] = hex.EncodeToString(v.Sum(nil))
 	}
@@ -72,7 +72,7 @@ func HashStreamTypes(r io.Reader, set HashSet) (map[HashType]string, error) {
 
 // String returns a string representation of the hash type.
 // The function will panic if the hash type is unknown.
-func (h HashType) String() string {
+func (h Type) String() string {
 	switch h {
 	case HashNone:
 		return "None"
@@ -89,7 +89,7 @@ func (h HashType) String() string {
 }
 
 // Set a HashType from a flag
-func (h *HashType) Set(s string) error {
+func (h *Type) Set(s string) error {
 	switch s {
 	case "None":
 		*h = HashNone
@@ -106,21 +106,21 @@ func (h *HashType) Set(s string) error {
 }
 
 // Type of the value
-func (h HashType) Type() string {
+func (h Type) Type() string {
 	return "string"
 }
 
 // Check it satisfies the interface
-var _ pflag.Value = (*HashType)(nil)
+var _ pflag.Value = (*Type)(nil)
 
 // hashFromTypes will return hashers for all the requested types.
 // The types must be a subset of SupportedHashes,
 // and this function must support all types.
-func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
+func hashFromTypes(set Set) (map[Type]hash.Hash, error) {
 	if !set.SubsetOf(SupportedHashes) {
 		return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
 	}
-	var hashers = make(map[HashType]hash.Hash)
+	var hashers = make(map[Type]hash.Hash)
 	types := set.Array()
 	for _, t := range types {
 		switch t {
@@ -141,7 +141,7 @@ func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
 // hashToMultiWriter will return a set of hashers into a
 // single multiwriter, where one write will update all
 // the hashers.
-func hashToMultiWriter(h map[HashType]hash.Hash) io.Writer {
+func hashToMultiWriter(h map[Type]hash.Hash) io.Writer {
 	// Convert to to slice
 	var w = make([]io.Writer, 0, len(h))
 	for _, v := range h {
@@ -155,7 +155,7 @@ func hashToMultiWriter(h map[HashType]hash.Hash) io.Writer {
 type MultiHasher struct {
 	w    io.Writer
 	size int64
-	h    map[HashType]hash.Hash // Hashes
+	h    map[Type]hash.Hash // Hashes
 }
 
 // NewMultiHasher will return a hash writer that will write all
@@ -170,7 +170,7 @@ func NewMultiHasher() *MultiHasher {
 
 // NewMultiHasherTypes will return a hash writer that will write
 // the requested hash types.
-func NewMultiHasherTypes(set HashSet) (*MultiHasher, error) {
+func NewMultiHasherTypes(set Set) (*MultiHasher, error) {
 	hashers, err := hashFromTypes(set)
 	if err != nil {
 		return nil, err
@@ -187,8 +187,8 @@ func (m *MultiHasher) Write(p []byte) (n int, err error) {
 
 // Sums returns the sums of all accumulated hashes as hex encoded
 // strings.
-func (m *MultiHasher) Sums() map[HashType]string {
-	dst := make(map[HashType]string)
+func (m *MultiHasher) Sums() map[Type]string {
+	dst := make(map[Type]string)
 	for k, v := range m.h {
 		dst[k] = hex.EncodeToString(v.Sum(nil))
 	}
@@ -200,63 +200,63 @@ func (m *MultiHasher) Size() int64 {
 	return m.size
 }
 
-// A HashSet Indicates one or more hash types.
-type HashSet int
+// A Set Indicates one or more hash types.
+type Set int
 
 // NewHashSet will create a new hash set with the hash types supplied
-func NewHashSet(t ...HashType) HashSet {
-	h := HashSet(HashNone)
+func NewHashSet(t ...Type) Set {
+	h := Set(HashNone)
 	return h.Add(t...)
 }
 
 // Add one or more hash types to the set.
 // Returns the modified hash set.
-func (h *HashSet) Add(t ...HashType) HashSet {
+func (h *Set) Add(t ...Type) Set {
 	for _, v := range t {
-		*h |= HashSet(v)
+		*h |= Set(v)
 	}
 	return *h
 }
 
 // Contains returns true if the
-func (h HashSet) Contains(t HashType) bool {
+func (h Set) Contains(t Type) bool {
 	return int(h)&int(t) != 0
 }
 
 // Overlap returns the overlapping hash types
-func (h HashSet) Overlap(t HashSet) HashSet {
-	return HashSet(int(h) & int(t))
+func (h Set) Overlap(t Set) Set {
+	return Set(int(h) & int(t))
 }
 
 // SubsetOf will return true if all types of h
 // is present in the set c
-func (h HashSet) SubsetOf(c HashSet) bool {
+func (h Set) SubsetOf(c Set) bool {
 	return int(h)|int(c) == int(c)
 }
 
 // GetOne will return a hash type.
 // Currently the first is returned, but it could be
 // improved to return the strongest.
-func (h HashSet) GetOne() HashType {
+func (h Set) GetOne() Type {
 	v := int(h)
 	i := uint(0)
 	for v != 0 {
 		if v&1 != 0 {
-			return HashType(1 << i)
+			return Type(1 << i)
 		}
 		i++
 		v >>= 1
 	}
-	return HashType(HashNone)
+	return Type(HashNone)
 }
 
 // Array returns an array of all hash types in the set
-func (h HashSet) Array() (ht []HashType) {
+func (h Set) Array() (ht []Type) {
 	v := int(h)
 	i := uint(0)
 	for v != 0 {
 		if v&1 != 0 {
-			ht = append(ht, HashType(1<<i))
+			ht = append(ht, Type(1<<i))
 		}
 		i++
 		v >>= 1
@@ -265,7 +265,7 @@ func (h HashSet) Array() (ht []HashType) {
 }
 
 // Count returns the number of hash types in the set
-func (h HashSet) Count() int {
+func (h Set) Count() int {
 	if int(h) == 0 {
 		return 0
 	}
@@ -281,7 +281,7 @@ func (h HashSet) Count() int {
 
 // String returns a string representation of the hash set.
 // The function will panic if it contains an unknown type.
-func (h HashSet) String() string {
+func (h Set) String() string {
 	a := h.Array()
 	var r []string
 	for _, v := range a {
@@ -289,3 +289,12 @@ func (h HashSet) String() string {
 	}
 	return "[" + strings.Join(r, ", ") + "]"
 }
+
+// Equals checks to see if src == dst, but ignores empty strings
+// and returns true if either is empty.
+func Equals(src, dst string) bool {
+	if src == "" || dst == "" {
+		return true
+	}
+	return src == dst
+}
diff --git a/fs/hash_test.go b/fs/hash/hash_test.go
similarity index 56%
rename from fs/hash_test.go
rename to fs/hash/hash_test.go
index b3cc8d07e..ccf06200a 100644
--- a/fs/hash_test.go
+++ b/fs/hash/hash_test.go
@@ -1,89 +1,89 @@
-package fs_test
+package hash_test
 
 import (
 	"bytes"
 	"io"
 	"testing"
 
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
 func TestHashSet(t *testing.T) {
-	var h fs.HashSet
+	var h hash.Set
 
 	assert.Equal(t, 0, h.Count())
 
 	a := h.Array()
 	assert.Len(t, a, 0)
 
-	h = h.Add(fs.HashMD5)
+	h = h.Add(hash.HashMD5)
 	assert.Equal(t, 1, h.Count())
-	assert.Equal(t, fs.HashMD5, h.GetOne())
+	assert.Equal(t, hash.HashMD5, h.GetOne())
 	a = h.Array()
 	assert.Len(t, a, 1)
-	assert.Equal(t, a[0], fs.HashMD5)
+	assert.Equal(t, a[0], hash.HashMD5)
 
 	// Test overlap, with all hashes
-	h = h.Overlap(fs.SupportedHashes)
+	h = h.Overlap(hash.SupportedHashes)
 	assert.Equal(t, 1, h.Count())
-	assert.Equal(t, fs.HashMD5, h.GetOne())
-	assert.True(t, h.SubsetOf(fs.SupportedHashes))
-	assert.True(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5)))
+	assert.Equal(t, hash.HashMD5, h.GetOne())
+	assert.True(t, h.SubsetOf(hash.SupportedHashes))
+	assert.True(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5)))
 
-	h = h.Add(fs.HashSHA1)
+	h = h.Add(hash.HashSHA1)
 	assert.Equal(t, 2, h.Count())
 	one := h.GetOne()
-	if !(one == fs.HashMD5 || one == fs.HashSHA1) {
+	if !(one == hash.HashMD5 || one == hash.HashSHA1) {
 		t.Fatalf("expected to be either MD5 or SHA1, got %v", one)
 	}
-	assert.True(t, h.SubsetOf(fs.SupportedHashes))
-	assert.False(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5)))
-	assert.False(t, h.SubsetOf(fs.NewHashSet(fs.HashSHA1)))
-	assert.True(t, h.SubsetOf(fs.NewHashSet(fs.HashMD5, fs.HashSHA1)))
+	assert.True(t, h.SubsetOf(hash.SupportedHashes))
+	assert.False(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5)))
+	assert.False(t, h.SubsetOf(hash.NewHashSet(hash.HashSHA1)))
+	assert.True(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5, hash.HashSHA1)))
 	a = h.Array()
 	assert.Len(t, a, 2)
 
-	ol := h.Overlap(fs.NewHashSet(fs.HashMD5))
+	ol := h.Overlap(hash.NewHashSet(hash.HashMD5))
 	assert.Equal(t, 1, ol.Count())
-	assert.True(t, ol.Contains(fs.HashMD5))
-	assert.False(t, ol.Contains(fs.HashSHA1))
+	assert.True(t, ol.Contains(hash.HashMD5))
+	assert.False(t, ol.Contains(hash.HashSHA1))
 
-	ol = h.Overlap(fs.NewHashSet(fs.HashMD5, fs.HashSHA1))
+	ol = h.Overlap(hash.NewHashSet(hash.HashMD5, hash.HashSHA1))
 	assert.Equal(t, 2, ol.Count())
-	assert.True(t, ol.Contains(fs.HashMD5))
-	assert.True(t, ol.Contains(fs.HashSHA1))
+	assert.True(t, ol.Contains(hash.HashMD5))
+	assert.True(t, ol.Contains(hash.HashSHA1))
 }
 
 type hashTest struct {
 	input  []byte
-	output map[fs.HashType]string
+	output map[hash.Type]string
 }
 
 var hashTestSet = []hashTest{
 	{
 		input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
-		output: map[fs.HashType]string{
-			fs.HashMD5:     "bf13fc19e5151ac57d4252e0e0f87abe",
-			fs.HashSHA1:    "3ab6543c08a75f292a5ecedac87ec41642d12166",
-			fs.HashDropbox: "214d2fcf3566e94c99ad2f59bd993daca46d8521a0c447adf4b324f53fddc0c7",
+		output: map[hash.Type]string{
+			hash.HashMD5:     "bf13fc19e5151ac57d4252e0e0f87abe",
+			hash.HashSHA1:    "3ab6543c08a75f292a5ecedac87ec41642d12166",
+			hash.HashDropbox: "214d2fcf3566e94c99ad2f59bd993daca46d8521a0c447adf4b324f53fddc0c7",
 		},
 	},
 	// Empty data set
 	{
 		input: []byte{},
-		output: map[fs.HashType]string{
-			fs.HashMD5:     "d41d8cd98f00b204e9800998ecf8427e",
-			fs.HashSHA1:    "da39a3ee5e6b4b0d3255bfef95601890afd80709",
-			fs.HashDropbox: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+		output: map[hash.Type]string{
+			hash.HashMD5:     "d41d8cd98f00b204e9800998ecf8427e",
+			hash.HashSHA1:    "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+			hash.HashDropbox: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
 		},
 	},
 }
 
 func TestMultiHasher(t *testing.T) {
 	for _, test := range hashTestSet {
-		mh := fs.NewMultiHasher()
+		mh := hash.NewMultiHasher()
 		n, err := io.Copy(mh, bytes.NewBuffer(test.input))
 		require.NoError(t, err)
 		assert.Len(t, test.input, int(n))
@@ -103,9 +103,9 @@ func TestMultiHasher(t *testing.T) {
 }
 
 func TestMultiHasherTypes(t *testing.T) {
-	h := fs.HashSHA1
+	h := hash.HashSHA1
 	for _, test := range hashTestSet {
-		mh, err := fs.NewMultiHasherTypes(fs.NewHashSet(h))
+		mh, err := hash.NewMultiHasherTypes(hash.NewHashSet(h))
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -120,7 +120,7 @@ func TestMultiHasherTypes(t *testing.T) {
 
 func TestHashStream(t *testing.T) {
 	for _, test := range hashTestSet {
-		sums, err := fs.HashStream(bytes.NewBuffer(test.input))
+		sums, err := hash.Stream(bytes.NewBuffer(test.input))
 		require.NoError(t, err)
 		for k, v := range sums {
 			expect, ok := test.output[k]
@@ -137,9 +137,9 @@ func TestHashStream(t *testing.T) {
 }
 
 func TestHashStreamTypes(t *testing.T) {
-	h := fs.HashSHA1
+	h := hash.HashSHA1
 	for _, test := range hashTestSet {
-		sums, err := fs.HashStreamTypes(bytes.NewBuffer(test.input), fs.NewHashSet(h))
+		sums, err := hash.StreamTypes(bytes.NewBuffer(test.input), hash.NewHashSet(h))
 		require.NoError(t, err)
 		assert.Len(t, sums, 1)
 		assert.Equal(t, sums[h], test.output[h])
@@ -147,17 +147,17 @@ func TestHashStreamTypes(t *testing.T) {
 }
 
 func TestHashSetStringer(t *testing.T) {
-	h := fs.NewHashSet(fs.HashSHA1, fs.HashMD5, fs.HashDropbox)
+	h := hash.NewHashSet(hash.HashSHA1, hash.HashMD5, hash.HashDropbox)
 	assert.Equal(t, h.String(), "[MD5, SHA-1, DropboxHash]")
-	h = fs.NewHashSet(fs.HashSHA1)
+	h = hash.NewHashSet(hash.HashSHA1)
 	assert.Equal(t, h.String(), "[SHA-1]")
-	h = fs.NewHashSet()
+	h = hash.NewHashSet()
 	assert.Equal(t, h.String(), "[]")
 }
 
 func TestHashStringer(t *testing.T) {
-	h := fs.HashMD5
+	h := hash.HashMD5
 	assert.Equal(t, h.String(), "MD5")
-	h = fs.HashNone
+	h = hash.HashNone
 	assert.Equal(t, h.String(), "None")
 }
diff --git a/fs/http_old.go b/fs/http_old.go
deleted file mode 100644
index fbe4a5b85..000000000
--- a/fs/http_old.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// HTTP parts pre go1.7
-
-//+build !go1.7
-
-package fs
-
-import (
-	"net"
-	"net/http"
-)
-
-// dial with timeouts
-func (ci *ConfigInfo) dialTimeout(network, address string) (net.Conn, error) {
-	dialer := ci.NewDialer()
-	c, err := dialer.Dial(network, address)
-	if err != nil {
-		return c, err
-	}
-	return newTimeoutConn(c, ci.Timeout), nil
-}
-
-// Initialise the http.Transport for pre go1.7
-func (ci *ConfigInfo) initTransport(t *http.Transport) {
-	t.Dial = ci.dialTimeout
-}
diff --git a/fs/list/list.go b/fs/list/list.go
new file mode 100644
index 000000000..4e213adbd
--- /dev/null
+++ b/fs/list/list.go
@@ -0,0 +1,102 @@
+// Package list contains list functions
+package list
+
+import (
+	"sort"
+	"strings"
+
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/pkg/errors"
+)
+
+// DirSorted reads Object and *Dir into entries for the given Fs.
+//
+// dir is the start directory, "" for root
+//
+// If includeAll is specified all files will be added, otherwise only
+// files and directories passing the filter will be added.
+//
+// Files will be returned in sorted order
+func DirSorted(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
+	// Get unfiltered entries from the fs
+	entries, err = f.List(dir)
+	if err != nil {
+		return nil, err
+	}
+	// This should happen only if exclude files lives in the
+	// starting directory, otherwise ListDirSorted should not be
+	// called.
+	if !includeAll && filter.Active.ListContainsExcludeFile(entries) {
+		fs.Debugf(dir, "Excluded from sync (and deletion)")
+		return nil, nil
+	}
+	return filterAndSortDir(entries, includeAll, dir, filter.Active.IncludeObject, filter.Active.IncludeDirectory(f))
+}
+
+// filter (if required) and check the entries, then sort them
+func filterAndSortDir(entries fs.DirEntries, includeAll bool, dir string,
+	IncludeObject func(o fs.Object) bool,
+	IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
+	newEntries = entries[:0] // in place filter
+	prefix := ""
+	if dir != "" {
+		prefix = dir + "/"
+	}
+	for _, entry := range entries {
+		ok := true
+		// check includes and types
+		switch x := entry.(type) {
+		case fs.Object:
+			// Make sure we don't delete excluded files if not required
+			if !includeAll && !IncludeObject(x) {
+				ok = false
+				fs.Debugf(x, "Excluded from sync (and deletion)")
+			}
+		case fs.Directory:
+			if !includeAll {
+				include, err := IncludeDirectory(x.Remote())
+				if err != nil {
+					return nil, err
+				}
+				if !include {
+					ok = false
+					fs.Debugf(x, "Excluded from sync (and deletion)")
+				}
+			}
+		default:
+			return nil, errors.Errorf("unknown object type %T", entry)
+		}
+		// check remote name belongs in this directry
+		remote := entry.Remote()
+		switch {
+		case !ok:
+			// ignore
+		case !strings.HasPrefix(remote, prefix):
+			ok = false
+			fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
+		case remote == prefix:
+			ok = false
+			fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
+		case strings.ContainsRune(remote[len(prefix):], '/'):
+			ok = false
+			fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
+		default:
+			// ok
+		}
+		if ok {
+			newEntries = append(newEntries, entry)
+		}
+	}
+	entries = newEntries
+
+	// Sort the directory entries by Remote
+	//
+	// We use a stable sort here just in case there are
+	// duplicates. Assuming the remote delivers the entries in a
+	// consistent order, this will give the best user experience
+	// in syncing as it will use the first entry for the sync
+	// comparison.
+	sort.Stable(entries)
+	return entries, nil
+}
diff --git a/fs/list/list_test.go b/fs/list/list_test.go
new file mode 100644
index 000000000..f6d8bdf18
--- /dev/null
+++ b/fs/list/list_test.go
@@ -0,0 +1,104 @@
+package list
+
+import (
+	"testing"
+	"time"
+
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fstest/mockdir"
+	"github.com/ncw/rclone/fstest/mockobject"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// NB integration tests for DirSorted are in
+// fs/operations/listdirsorted_test.go
+
+func TestFilterAndSortIncludeAll(t *testing.T) {
+	da := mockdir.New("a")
+	oA := mockobject.Object("A")
+	db := mockdir.New("b")
+	oB := mockobject.Object("B")
+	dc := mockdir.New("c")
+	oC := mockobject.Object("C")
+	dd := mockdir.New("d")
+	oD := mockobject.Object("D")
+	entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
+	includeObject := func(o fs.Object) bool {
+		return o != oB
+	}
+	includeDirectory := func(remote string) (bool, error) {
+		return remote != "c", nil
+	}
+	// no filter
+	newEntries, err := filterAndSortDir(entries, true, "", includeObject, includeDirectory)
+	require.NoError(t, err)
+	assert.Equal(t,
+		newEntries,
+		fs.DirEntries{oA, oB, oC, oD, da, db, dc, dd},
+	)
+	// filter
+	newEntries, err = filterAndSortDir(entries, false, "", includeObject, includeDirectory)
+	require.NoError(t, err)
+	assert.Equal(t,
+		newEntries,
+		fs.DirEntries{oA, oC, oD, da, db, dd},
+	)
+}
+
+func TestFilterAndSortCheckDir(t *testing.T) {
+	// Check the different kinds of error when listing "dir"
+	da := mockdir.New("dir/")
+	oA := mockobject.Object("diR/a")
+	db := mockdir.New("dir/b")
+	oB := mockobject.Object("dir/B/sub")
+	dc := mockdir.New("dir/c")
+	oC := mockobject.Object("dir/C")
+	dd := mockdir.New("dir/d")
+	oD := mockobject.Object("dir/D")
+	entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
+	newEntries, err := filterAndSortDir(entries, true, "dir", nil, nil)
+	require.NoError(t, err)
+	assert.Equal(t,
+		newEntries,
+		fs.DirEntries{oC, oD, db, dc, dd},
+	)
+}
+
+func TestFilterAndSortCheckDirRoot(t *testing.T) {
+	// Check the different kinds of error when listing the root ""
+	da := mockdir.New("")
+	oA := mockobject.Object("A")
+	db := mockdir.New("b")
+	oB := mockobject.Object("B/sub")
+	dc := mockdir.New("c")
+	oC := mockobject.Object("C")
+	dd := mockdir.New("d")
+	oD := mockobject.Object("D")
+	entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
+	newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
+	require.NoError(t, err)
+	assert.Equal(t,
+		newEntries,
+		fs.DirEntries{oA, oC, oD, db, dc, dd},
+	)
+}
+
+type unknownDirEntry string
+
+func (o unknownDirEntry) String() string         { return string(o) }
+func (o unknownDirEntry) Remote() string         { return string(o) }
+func (o unknownDirEntry) ModTime() (t time.Time) { return t }
+func (o unknownDirEntry) Size() int64            { return 0 }
+
+func TestFilterAndSortUnknown(t *testing.T) {
+	// Check that an unknown entry produces an error
+	da := mockdir.New("")
+	oA := mockobject.Object("A")
+	ub := unknownDirEntry("b")
+	oB := mockobject.Object("B/sub")
+	entries := fs.DirEntries{da, oA, ub, oB}
+	newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
+	assert.Error(t, err, "error")
+	assert.Nil(t, newEntries)
+}
diff --git a/fs/log.go b/fs/log.go
index 3c6573ecf..94f61b9a0 100644
--- a/fs/log.go
+++ b/fs/log.go
@@ -1,17 +1,10 @@
-// Logging for rclone
-
 package fs
 
 import (
 	"fmt"
 	"log"
-	"os"
-	"reflect"
-	"runtime"
-	"strings"
 
 	"github.com/pkg/errors"
-	"github.com/spf13/pflag"
 )
 
 // LogLevel describes rclone's logs.  These are a subset of the syslog log levels.
@@ -74,35 +67,25 @@ func (l *LogLevel) Type() string {
 	return "string"
 }
 
-// Check it satisfies the interface
-var _ pflag.Value = (*LogLevel)(nil)
-
-// Flags
-var (
-	logFile        = StringP("log-file", "", "", "Log everything to this file")
-	useSyslog      = BoolP("syslog", "", false, "Use Syslog for logging")
-	syslogFacility = StringP("syslog-facility", "", "DAEMON", "Facility for syslog, eg KERN,USER,...")
-)
-
-// logPrint sends the text to the logger of level
-var logPrint = func(level LogLevel, text string) {
+// LogPrint sends the text to the logger of level
+var LogPrint = func(level LogLevel, text string) {
 	text = fmt.Sprintf("%-6s: %s", level, text)
 	log.Print(text)
 }
 
-// logPrintf produces a log string from the arguments passed in
-func logPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
+// LogPrintf produces a log string from the arguments passed in
+func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
 	out := fmt.Sprintf(text, args...)
 	if o != nil {
 		out = fmt.Sprintf("%v: %s", o, out)
 	}
-	logPrint(level, out)
+	LogPrint(level, out)
 }
 
 // LogLevelPrintf writes logs at the given level
 func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
 	if Config.LogLevel >= level {
-		logPrintf(level, o, text, args...)
+		LogPrintf(level, o, text, args...)
 	}
 }
 
@@ -110,7 +93,7 @@ func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interfac
 // should always be seen by the user.
 func Errorf(o interface{}, text string, args ...interface{}) {
 	if Config.LogLevel >= LogLevelError {
-		logPrintf(LogLevelError, o, text, args...)
+		LogPrintf(LogLevelError, o, text, args...)
 	}
 }
 
@@ -121,7 +104,7 @@ func Errorf(o interface{}, text string, args ...interface{}) {
 // out with the -q flag.
 func Logf(o interface{}, text string, args ...interface{}) {
 	if Config.LogLevel >= LogLevelNotice {
-		logPrintf(LogLevelNotice, o, text, args...)
+		LogPrintf(LogLevelNotice, o, text, args...)
 	}
 }
 
@@ -130,7 +113,7 @@ func Logf(o interface{}, text string, args ...interface{}) {
 // appear with the -v flag.
 func Infof(o interface{}, text string, args ...interface{}) {
 	if Config.LogLevel >= LogLevelInfo {
-		logPrintf(LogLevelInfo, o, text, args...)
+		LogPrintf(LogLevelInfo, o, text, args...)
 	}
 }
 
@@ -138,75 +121,15 @@ func Infof(o interface{}, text string, args ...interface{}) {
 // debug only.  The user must have to specify -vv to see this.
 func Debugf(o interface{}, text string, args ...interface{}) {
 	if Config.LogLevel >= LogLevelDebug {
-		logPrintf(LogLevelDebug, o, text, args...)
+		LogPrintf(LogLevelDebug, o, text, args...)
 	}
 }
 
-// fnName returns the name of the calling +2 function
-func fnName() string {
-	pc, _, _, ok := runtime.Caller(2)
-	name := "*Unknown*"
-	if ok {
-		name = runtime.FuncForPC(pc).Name()
-		dot := strings.LastIndex(name, ".")
-		if dot >= 0 {
-			name = name[dot+1:]
-		}
-	}
-	return name
-}
-
-// Trace debugs the entry and exit of the calling function
-//
-// It is designed to be used in a defer statement so it returns a
-// function that logs the exit parameters.
-//
-// Any pointers in the exit function will be dereferenced
-func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
-	if Config.LogLevel < LogLevelDebug {
-		return func(format string, a ...interface{}) {}
-	}
-	name := fnName()
-	logPrintf(LogLevelDebug, o, name+": "+format, a...)
-	return func(format string, a ...interface{}) {
-		for i := range a {
-			// read the values of the pointed to items
-			typ := reflect.TypeOf(a[i])
-			if typ.Kind() == reflect.Ptr {
-				value := reflect.ValueOf(a[i])
-				if value.IsNil() {
-					a[i] = nil
-				} else {
-					pointedToValue := reflect.Indirect(value)
-					a[i] = pointedToValue.Interface()
-				}
-			}
-		}
-		logPrintf(LogLevelDebug, o, ">"+name+": "+format, a...)
-	}
-}
-
-// InitLogging start the logging as per the command line flags
-func InitLogging() {
-	// Log file output
-	if *logFile != "" {
-		f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
-		if err != nil {
-			log.Fatalf("Failed to open log file: %v", err)
-		}
-		_, err = f.Seek(0, os.SEEK_END)
-		if err != nil {
-			Errorf(nil, "Failed to seek log file to end: %v", err)
-		}
-		log.SetOutput(f)
-		redirectStderr(f)
-	}
-
-	// Syslog output
-	if *useSyslog {
-		if *logFile != "" {
-			log.Fatalf("Can't use --syslog and --log-file together")
-		}
-		startSysLog()
+// LogDirName returns an object for the logger, logging a root
+// directory which would normally be "" as the Fs
+func LogDirName(f Fs, dir string) interface{} {
+	if dir != "" {
+		return dir
 	}
+	return f
 }
diff --git a/fs/log/log.go b/fs/log/log.go
new file mode 100644
index 000000000..0e7908e8f
--- /dev/null
+++ b/fs/log/log.go
@@ -0,0 +1,89 @@
+// Package log provides logging for rclone
+package log
+
+import (
+	"log"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config/flags"
+)
+
+// Flags
+var (
+	logFile        = flags.StringP("log-file", "", "", "Log everything to this file")
+	useSyslog      = flags.BoolP("syslog", "", false, "Use Syslog for logging")
+	syslogFacility = flags.StringP("syslog-facility", "", "DAEMON", "Facility for syslog, eg KERN,USER,...")
+)
+
+// fnName returns the name of the calling +2 function
+func fnName() string {
+	pc, _, _, ok := runtime.Caller(2)
+	name := "*Unknown*"
+	if ok {
+		name = runtime.FuncForPC(pc).Name()
+		dot := strings.LastIndex(name, ".")
+		if dot >= 0 {
+			name = name[dot+1:]
+		}
+	}
+	return name
+}
+
+// Trace debugs the entry and exit of the calling function
+//
+// It is designed to be used in a defer statement so it returns a
+// function that logs the exit parameters.
+//
+// Any pointers in the exit function will be dereferenced
+func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
+	if fs.Config.LogLevel < fs.LogLevelDebug {
+		return func(format string, a ...interface{}) {}
+	}
+	name := fnName()
+	fs.LogPrintf(fs.LogLevelDebug, o, name+": "+format, a...)
+	return func(format string, a ...interface{}) {
+		for i := range a {
+			// read the values of the pointed to items
+			typ := reflect.TypeOf(a[i])
+			if typ.Kind() == reflect.Ptr {
+				value := reflect.ValueOf(a[i])
+				if value.IsNil() {
+					a[i] = nil
+				} else {
+					pointedToValue := reflect.Indirect(value)
+					a[i] = pointedToValue.Interface()
+				}
+			}
+		}
+		fs.LogPrintf(fs.LogLevelDebug, o, ">"+name+": "+format, a...)
+	}
+}
+
+// InitLogging start the logging as per the command line flags
+func InitLogging() {
+	// Log file output
+	if *logFile != "" {
+		f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
+		if err != nil {
+			log.Fatalf("Failed to open log file: %v", err)
+		}
+		_, err = f.Seek(0, os.SEEK_END)
+		if err != nil {
+			fs.Errorf(nil, "Failed to seek log file to end: %v", err)
+		}
+		log.SetOutput(f)
+		redirectStderr(f)
+	}
+
+	// Syslog output
+	if *useSyslog {
+		if *logFile != "" {
+			log.Fatalf("Can't use --syslog and --log-file together")
+		}
+		startSysLog()
+	}
+}
diff --git a/fs/redirect_stderr.go b/fs/log/redirect_stderr.go
similarity index 67%
rename from fs/redirect_stderr.go
rename to fs/log/redirect_stderr.go
index 3a2425c97..72ac2f026 100644
--- a/fs/redirect_stderr.go
+++ b/fs/log/redirect_stderr.go
@@ -2,11 +2,15 @@
 
 // +build !windows,!darwin,!dragonfly,!freebsd,!linux,!nacl,!netbsd,!openbsd
 
-package fs
+package log
 
-import "os"
+import (
+	"os"
+
+	"github.com/ncw/rclone/fs"
+)
 
 // redirectStderr to the file passed in
 func redirectStderr(f *os.File) {
-	Errorf(nil, "Can't redirect stderr to file")
+	fs.Errorf(nil, "Can't redirect stderr to file")
 }
diff --git a/fs/redirect_stderr_unix.go b/fs/log/redirect_stderr_unix.go
similarity index 96%
rename from fs/redirect_stderr_unix.go
rename to fs/log/redirect_stderr_unix.go
index 5126f53aa..5c5967d9e 100644
--- a/fs/redirect_stderr_unix.go
+++ b/fs/log/redirect_stderr_unix.go
@@ -2,7 +2,7 @@
 
 // +build !windows,!solaris,!plan9
 
-package fs
+package log
 
 import (
 	"log"
diff --git a/fs/redirect_stderr_windows.go b/fs/log/redirect_stderr_windows.go
similarity index 98%
rename from fs/redirect_stderr_windows.go
rename to fs/log/redirect_stderr_windows.go
index c6e98eb1a..c800305c3 100644
--- a/fs/redirect_stderr_windows.go
+++ b/fs/log/redirect_stderr_windows.go
@@ -6,7 +6,7 @@
 
 // +build windows
 
-package fs
+package log
 
 import (
 	"log"
diff --git a/fs/syslog.go b/fs/log/syslog.go
similarity index 95%
rename from fs/syslog.go
rename to fs/log/syslog.go
index 623ce346d..17c8094f9 100644
--- a/fs/syslog.go
+++ b/fs/log/syslog.go
@@ -2,7 +2,7 @@
 
 // +build windows nacl plan9
 
-package fs
+package log
 
 import (
 	"log"
diff --git a/fs/syslog_unix.go b/fs/log/syslog_unix.go
similarity index 79%
rename from fs/syslog_unix.go
rename to fs/log/syslog_unix.go
index 13d2825a9..2491b74c2 100644
--- a/fs/syslog_unix.go
+++ b/fs/log/syslog_unix.go
@@ -2,13 +2,15 @@
 
 // +build !windows,!nacl,!plan9
 
-package fs
+package log
 
 import (
 	"log"
 	"log/syslog"
 	"os"
 	"path"
+
+	"github.com/ncw/rclone/fs"
 )
 
 var (
@@ -41,23 +43,23 @@ func startSysLog() bool {
 	}
 	log.SetFlags(0)
 	log.SetOutput(w)
-	logPrint = func(level LogLevel, text string) {
+	fs.LogPrint = func(level fs.LogLevel, text string) {
 		switch level {
-		case LogLevelEmergency:
+		case fs.LogLevelEmergency:
 			_ = w.Emerg(text)
-		case LogLevelAlert:
+		case fs.LogLevelAlert:
 			_ = w.Alert(text)
-		case LogLevelCritical:
+		case fs.LogLevelCritical:
 			_ = w.Crit(text)
-		case LogLevelError:
+		case fs.LogLevelError:
 			_ = w.Err(text)
-		case LogLevelWarning:
+		case fs.LogLevelWarning:
 			_ = w.Warning(text)
-		case LogLevelNotice:
+		case fs.LogLevelNotice:
 			_ = w.Notice(text)
-		case LogLevelInfo:
+		case fs.LogLevelInfo:
 			_ = w.Info(text)
-		case LogLevelDebug:
+		case fs.LogLevelDebug:
 			_ = w.Debug(text)
 		}
 	}
diff --git a/fs/log_test.go b/fs/log_test.go
new file mode 100644
index 000000000..4e6d23f95
--- /dev/null
+++ b/fs/log_test.go
@@ -0,0 +1,6 @@
+package fs
+
+import "github.com/spf13/pflag"
+
+// Check it satisfies the interface
+var _ pflag.Value = (*LogLevel)(nil)
diff --git a/fs/march.go b/fs/march/march.go
similarity index 76%
rename from fs/march.go
rename to fs/march/march.go
index 4113e07a6..2ab2974aa 100644
--- a/fs/march.go
+++ b/fs/march/march.go
@@ -1,4 +1,5 @@
-package fs
+// Package march traverses two directories in lock step
+package march
 
 import (
 	"path"
@@ -6,37 +7,42 @@ import (
 	"strings"
 	"sync"
 
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fs/list"
+	"github.com/ncw/rclone/fs/walk"
 	"golang.org/x/net/context"
 	"golang.org/x/text/unicode/norm"
 )
 
-// march traverses two Fs simultaneously, calling walker for each match
-type march struct {
+// March holds the data used to traverse two Fs simultaneously,
+// calling callback for each match
+type March struct {
 	// parameters
 	ctx      context.Context
-	fdst     Fs
-	fsrc     Fs
+	fdst     fs.Fs
+	fsrc     fs.Fs
 	dir      string
-	callback marcher
+	callback Marcher
 	// internal state
 	srcListDir listDirFn // function to call to list a directory in the src
 	dstListDir listDirFn // function to call to list a directory in the dst
 	transforms []matchTransformFn
 }
 
-// marcher is called on each match
-type marcher interface {
+// Marcher is called on each match
+type Marcher interface {
 	// SrcOnly is called for a DirEntry found only in the source
-	SrcOnly(src DirEntry) (recurse bool)
+	SrcOnly(src fs.DirEntry) (recurse bool)
 	// DstOnly is called for a DirEntry found only in the destination
-	DstOnly(dst DirEntry) (recurse bool)
+	DstOnly(dst fs.DirEntry) (recurse bool)
 	// Match is called for a DirEntry found both in the source and destination
-	Match(dst, src DirEntry) (recurse bool)
+	Match(dst, src fs.DirEntry) (recurse bool)
 }
 
-// newMarch sets up a march over fsrc, and fdst calling back callback for each match
-func newMarch(ctx context.Context, fdst, fsrc Fs, dir string, callback marcher) *march {
-	m := &march{
+// New sets up a march over fsrc, and fdst calling back callback for each match
+func New(ctx context.Context, fdst, fsrc fs.Fs, dir string, callback Marcher) *March {
+	m := &March{
 		ctx:      ctx,
 		fdst:     fdst,
 		fsrc:     fsrc,
@@ -44,7 +50,7 @@ func newMarch(ctx context.Context, fdst, fsrc Fs, dir string, callback marcher)
 		callback: callback,
 	}
 	m.srcListDir = m.makeListDir(fsrc, false)
-	m.dstListDir = m.makeListDir(fdst, Config.Filter.DeleteExcluded)
+	m.dstListDir = m.makeListDir(fdst, filter.Active.Opt.DeleteExcluded)
 	// Now create the matching transform
 	// ..normalise the UTF8 first
 	m.transforms = append(m.transforms, norm.NFC.String)
@@ -61,26 +67,26 @@ func newMarch(ctx context.Context, fdst, fsrc Fs, dir string, callback marcher)
 }
 
 // list a directory into entries, err
-type listDirFn func(dir string) (entries DirEntries, err error)
+type listDirFn func(dir string) (entries fs.DirEntries, err error)
 
 // makeListDir makes a listing function for the given fs and includeAll flags
-func (m *march) makeListDir(f Fs, includeAll bool) listDirFn {
-	if !Config.UseListR || f.Features().ListR == nil {
-		return func(dir string) (entries DirEntries, err error) {
-			return ListDirSorted(f, includeAll, dir)
+func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
+	if !fs.Config.UseListR || f.Features().ListR == nil {
+		return func(dir string) (entries fs.DirEntries, err error) {
+			return list.DirSorted(f, includeAll, dir)
 		}
 	}
 	var (
 		mu      sync.Mutex
 		started bool
-		dirs    DirTree
+		dirs    walk.DirTree
 		dirsErr error
 	)
-	return func(dir string) (entries DirEntries, err error) {
+	return func(dir string) (entries fs.DirEntries, err error) {
 		mu.Lock()
 		defer mu.Unlock()
 		if !started {
-			dirs, dirsErr = NewDirTree(f, m.dir, includeAll, Config.MaxDepth)
+			dirs, dirsErr = walk.NewDirTree(f, m.dir, includeAll, fs.Config.MaxDepth)
 			started = true
 		}
 		if dirsErr != nil {
@@ -88,7 +94,7 @@ func (m *march) makeListDir(f Fs, includeAll bool) listDirFn {
 		}
 		entries, ok := dirs[dir]
 		if !ok {
-			err = ErrorDirNotFound
+			err = fs.ErrorDirNotFound
 		} else {
 			delete(dirs, dir)
 		}
@@ -106,22 +112,22 @@ type listDirJob struct {
 	noDst     bool
 }
 
-// run starts the matching process off
-func (m *march) run() {
-	srcDepth := Config.MaxDepth
+// Run starts the matching process off
+func (m *March) Run() {
+	srcDepth := fs.Config.MaxDepth
 	if srcDepth < 0 {
-		srcDepth = MaxLevel
+		srcDepth = fs.MaxLevel
 	}
 	dstDepth := srcDepth
-	if Config.Filter.DeleteExcluded {
-		dstDepth = MaxLevel
+	if filter.Active.Opt.DeleteExcluded {
+		dstDepth = fs.MaxLevel
 	}
 
 	// Start some directory listing go routines
 	var wg sync.WaitGroup         // sync closing of go routines
 	var traversing sync.WaitGroup // running directory traversals
-	in := make(chan listDirJob, Config.Checkers)
-	for i := 0; i < Config.Checkers; i++ {
+	in := make(chan listDirJob, fs.Config.Checkers)
+	for i := 0; i < fs.Config.Checkers; i++ {
 		wg.Add(1)
 		go func() {
 			defer wg.Done()
@@ -164,7 +170,7 @@ func (m *march) run() {
 }
 
 // Check to see if the context has been cancelled
-func (m *march) aborting() bool {
+func (m *March) aborting() bool {
 	select {
 	case <-m.ctx.Done():
 		return true
@@ -175,7 +181,7 @@ func (m *march) aborting() bool {
 
 // matchEntry is an entry plus transformed name
 type matchEntry struct {
-	entry DirEntry
+	entry fs.DirEntry
 	leaf  string
 	name  string
 }
@@ -215,7 +221,7 @@ func (es matchEntries) sort() {
 }
 
 // make a matchEntries from a newMatch entries
-func newMatchEntries(entries DirEntries, transforms []matchTransformFn) matchEntries {
+func newMatchEntries(entries fs.DirEntries, transforms []matchTransformFn) matchEntries {
 	es := make(matchEntries, len(entries))
 	for i := range es {
 		es[i].entry = entries[i]
@@ -232,7 +238,7 @@ func newMatchEntries(entries DirEntries, transforms []matchTransformFn) matchEnt
 
 // matchPair is a matched pair of direntries returned by matchListings
 type matchPair struct {
-	src, dst DirEntry
+	src, dst fs.DirEntry
 }
 
 // matchTransformFn converts a name into a form which is used for
@@ -247,11 +253,11 @@ type matchTransformFn func(name string) string
 // Into matches go matchPair's of src and dst which have the same name
 //
 // This checks for duplicates and checks the list is sorted.
-func matchListings(srcListEntries, dstListEntries DirEntries, transforms []matchTransformFn) (srcOnly DirEntries, dstOnly DirEntries, matches []matchPair) {
+func matchListings(srcListEntries, dstListEntries fs.DirEntries, transforms []matchTransformFn) (srcOnly fs.DirEntries, dstOnly fs.DirEntries, matches []matchPair) {
 	srcList := newMatchEntries(srcListEntries, transforms)
 	dstList := newMatchEntries(dstListEntries, transforms)
 	for iSrc, iDst := 0, 0; ; iSrc, iDst = iSrc+1, iDst+1 {
-		var src, dst DirEntry
+		var src, dst fs.DirEntry
 		var srcName, dstName string
 		if iSrc < len(srcList) {
 			src = srcList[iSrc].entry
@@ -267,7 +273,7 @@ func matchListings(srcListEntries, dstListEntries DirEntries, transforms []match
 		if src != nil && iSrc > 0 {
 			prev := srcList[iSrc-1].name
 			if srcName == prev {
-				Logf(src, "Duplicate %s found in source - ignoring", DirEntryType(src))
+				fs.Logf(src, "Duplicate %s found in source - ignoring", fs.DirEntryType(src))
 				iDst-- // ignore the src and retry the dst
 				continue
 			} else if srcName < prev {
@@ -278,7 +284,7 @@ func matchListings(srcListEntries, dstListEntries DirEntries, transforms []match
 		if dst != nil && iDst > 0 {
 			prev := dstList[iDst-1].name
 			if dstName == prev {
-				Logf(dst, "Duplicate %s found in destination - ignoring", DirEntryType(dst))
+				fs.Logf(dst, "Duplicate %s found in destination - ignoring", fs.DirEntryType(dst))
 				iSrc-- // ignore the dst and retry the src
 				continue
 			} else if dstName < prev {
@@ -315,9 +321,9 @@ func matchListings(srcListEntries, dstListEntries DirEntries, transforms []match
 // more jobs
 //
 // returns errors using processError
-func (m *march) processJob(job listDirJob) (jobs []listDirJob) {
+func (m *March) processJob(job listDirJob) (jobs []listDirJob) {
 	var (
-		srcList, dstList       DirEntries
+		srcList, dstList       fs.DirEntries
 		srcListErr, dstListErr error
 		wg                     sync.WaitGroup
 	)
@@ -341,15 +347,15 @@ func (m *march) processJob(job listDirJob) (jobs []listDirJob) {
 	// Wait for listings to complete and report errors
 	wg.Wait()
 	if srcListErr != nil {
-		Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
-		Stats.Error(srcListErr)
+		fs.Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
+		fs.CountError(srcListErr)
 		return nil
 	}
-	if dstListErr == ErrorDirNotFound {
+	if dstListErr == fs.ErrorDirNotFound {
 		// Copy the stuff anyway
 	} else if dstListErr != nil {
-		Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
-		Stats.Error(dstListErr)
+		fs.Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
+		fs.CountError(dstListErr)
 		return nil
 	}
 
diff --git a/fs/march_test.go b/fs/march/march_test.go
similarity index 72%
rename from fs/march_test.go
rename to fs/march/march_test.go
index 802d3934c..19a8af8f9 100644
--- a/fs/march_test.go
+++ b/fs/march/march_test.go
@@ -1,23 +1,25 @@
 // Internal tests for march
 
-package fs
+package march
 
 import (
 	"strings"
 	"testing"
 
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fstest/mockobject"
 	"github.com/stretchr/testify/assert"
 )
 
 func TestNewMatchEntries(t *testing.T) {
 	var (
-		a = mockObject("path/a")
-		A = mockObject("path/A")
-		B = mockObject("path/B")
-		c = mockObject("path/c")
+		a = mockobject.Object("path/a")
+		A = mockobject.Object("path/A")
+		B = mockobject.Object("path/B")
+		c = mockobject.Object("path/c")
 	)
 
-	es := newMatchEntries(DirEntries{a, A, B, c}, nil)
+	es := newMatchEntries(fs.DirEntries{a, A, B, c}, nil)
 	assert.Equal(t, es, matchEntries{
 		{name: "A", leaf: "A", entry: A},
 		{name: "B", leaf: "B", entry: B},
@@ -25,7 +27,7 @@ func TestNewMatchEntries(t *testing.T) {
 		{name: "c", leaf: "c", entry: c},
 	})
 
-	es = newMatchEntries(DirEntries{a, A, B, c}, []matchTransformFn{strings.ToLower})
+	es = newMatchEntries(fs.DirEntries{a, A, B, c}, []matchTransformFn{strings.ToLower})
 	assert.Equal(t, es, matchEntries{
 		{name: "a", leaf: "A", entry: A},
 		{name: "a", leaf: "a", entry: a},
@@ -36,45 +38,45 @@ func TestNewMatchEntries(t *testing.T) {
 
 func TestMatchListings(t *testing.T) {
 	var (
-		a = mockObject("a")
-		A = mockObject("A")
-		b = mockObject("b")
-		c = mockObject("c")
-		d = mockObject("d")
+		a = mockobject.Object("a")
+		A = mockobject.Object("A")
+		b = mockobject.Object("b")
+		c = mockobject.Object("c")
+		d = mockobject.Object("d")
 	)
 
 	for _, test := range []struct {
 		what       string
-		input      DirEntries // pairs of input src, dst
-		srcOnly    DirEntries
-		dstOnly    DirEntries
+		input      fs.DirEntries // pairs of input src, dst
+		srcOnly    fs.DirEntries
+		dstOnly    fs.DirEntries
 		matches    []matchPair // pairs of output
 		transforms []matchTransformFn
 	}{
 		{
 			what: "only src or dst",
-			input: DirEntries{
+			input: fs.DirEntries{
 				a, nil,
 				b, nil,
 				c, nil,
 				d, nil,
 			},
-			srcOnly: DirEntries{
+			srcOnly: fs.DirEntries{
 				a, b, c, d,
 			},
 		},
 		{
 			what: "typical sync #1",
-			input: DirEntries{
+			input: fs.DirEntries{
 				a, nil,
 				b, b,
 				nil, c,
 				nil, d,
 			},
-			srcOnly: DirEntries{
+			srcOnly: fs.DirEntries{
 				a,
 			},
-			dstOnly: DirEntries{
+			dstOnly: fs.DirEntries{
 				c, d,
 			},
 			matches: []matchPair{
@@ -83,13 +85,13 @@ func TestMatchListings(t *testing.T) {
 		},
 		{
 			what: "typical sync #2",
-			input: DirEntries{
+			input: fs.DirEntries{
 				a, a,
 				b, b,
 				nil, c,
 				d, d,
 			},
-			dstOnly: DirEntries{
+			dstOnly: fs.DirEntries{
 				c,
 			},
 			matches: []matchPair{
@@ -100,7 +102,7 @@ func TestMatchListings(t *testing.T) {
 		},
 		{
 			what: "One duplicate",
-			input: DirEntries{
+			input: fs.DirEntries{
 				A, A,
 				a, a,
 				a, nil,
@@ -114,7 +116,7 @@ func TestMatchListings(t *testing.T) {
 		},
 		{
 			what: "Two duplicates",
-			input: DirEntries{
+			input: fs.DirEntries{
 				a, a,
 				a, a,
 				a, nil,
@@ -125,7 +127,7 @@ func TestMatchListings(t *testing.T) {
 		},
 		{
 			what: "Case insensitive duplicate - no transform",
-			input: DirEntries{
+			input: fs.DirEntries{
 				a, a,
 				A, A,
 			},
@@ -136,7 +138,7 @@ func TestMatchListings(t *testing.T) {
 		},
 		{
 			what: "Case insensitive duplicate - transform to lower case",
-			input: DirEntries{
+			input: fs.DirEntries{
 				a, a,
 				A, A,
 			},
@@ -146,7 +148,7 @@ func TestMatchListings(t *testing.T) {
 			transforms: []matchTransformFn{strings.ToLower},
 		},
 	} {
-		var srcList, dstList DirEntries
+		var srcList, dstList fs.DirEntries
 		for i := 0; i < len(test.input); i += 2 {
 			src, dst := test.input[i], test.input[i+1]
 			if src != nil {
diff --git a/fs/mimetype.go b/fs/mimetype.go
new file mode 100644
index 000000000..22cfb8604
--- /dev/null
+++ b/fs/mimetype.go
@@ -0,0 +1,30 @@
+package fs
+
+import (
+	"mime"
+	"path"
+	"strings"
+)
+
+// MimeTypeFromName returns a guess at the mime type from the name
+func MimeTypeFromName(remote string) (mimeType string) {
+	mimeType = mime.TypeByExtension(path.Ext(remote))
+	if !strings.ContainsRune(mimeType, '/') {
+		mimeType = "application/octet-stream"
+	}
+	return mimeType
+}
+
+// MimeType returns the MimeType from the object, either by calling
+// the MimeTyper interface or using MimeTypeFromName
+func MimeType(o ObjectInfo) (mimeType string) {
+	// Read the MimeType from the optional interface if available
+	if do, ok := o.(MimeTyper); ok {
+		mimeType = do.MimeType()
+		// Debugf(o, "Read MimeType as %q", mimeType)
+		if mimeType != "" {
+			return mimeType
+		}
+	}
+	return MimeTypeFromName(o.Remote())
+}
diff --git a/fs/object.go b/fs/object/object.go
similarity index 78%
rename from fs/object.go
rename to fs/object/object.go
index 18c3d8f39..c4ac9b7fd 100644
--- a/fs/object.go
+++ b/fs/object/object.go
@@ -1,4 +1,5 @@
-package fs
+// Package object defines some useful Objects
+package object
 
 import (
 	"bytes"
@@ -6,12 +7,15 @@ import (
 	"io"
 	"io/ioutil"
 	"time"
+
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/hash"
 )
 
 // NewStaticObjectInfo returns a static ObjectInfo
 // If hashes is nil and fs is not nil, the hash map will be replaced with
 // empty hashes of the types supported by the fs.
-func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[HashType]string, fs Info) ObjectInfo {
+func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[hash.Type]string, fs fs.Info) fs.ObjectInfo {
 	info := &staticObjectInfo{
 		remote:   remote,
 		modTime:  modTime,
@@ -22,7 +26,7 @@ func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable
 	}
 	if fs != nil && hashes == nil {
 		set := fs.Hashes().Array()
-		info.hashes = make(map[HashType]string)
+		info.hashes = make(map[hash.Type]string)
 		for _, ht := range set {
 			info.hashes[ht] = ""
 		}
@@ -35,24 +39,24 @@ type staticObjectInfo struct {
 	modTime  time.Time
 	size     int64
 	storable bool
-	hashes   map[HashType]string
-	fs       Info
+	hashes   map[hash.Type]string
+	fs       fs.Info
 }
 
-func (i *staticObjectInfo) Fs() Info           { return i.fs }
+func (i *staticObjectInfo) Fs() fs.Info        { return i.fs }
 func (i *staticObjectInfo) Remote() string     { return i.remote }
 func (i *staticObjectInfo) String() string     { return i.remote }
 func (i *staticObjectInfo) ModTime() time.Time { return i.modTime }
 func (i *staticObjectInfo) Size() int64        { return i.size }
 func (i *staticObjectInfo) Storable() bool     { return i.storable }
-func (i *staticObjectInfo) Hash(h HashType) (string, error) {
+func (i *staticObjectInfo) Hash(h hash.Type) (string, error) {
 	if len(i.hashes) == 0 {
-		return "", ErrHashUnsupported
+		return "", hash.ErrHashUnsupported
 	}
 	if hash, ok := i.hashes[h]; ok {
 		return hash, nil
 	}
-	return "", ErrHashUnsupported
+	return "", hash.ErrHashUnsupported
 }
 
 // MemoryFs is an in memory Fs, it only supports FsInfo and Put
@@ -74,10 +78,10 @@ func (memoryFs) String() string { return "memory" }
 func (memoryFs) Precision() time.Duration { return time.Nanosecond }
 
 // Returns the supported hash types of the filesystem
-func (memoryFs) Hashes() HashSet { return SupportedHashes }
+func (memoryFs) Hashes() hash.Set { return hash.SupportedHashes }
 
 // Features returns the optional features of this Fs
-func (memoryFs) Features() *Features { return &Features{} }
+func (memoryFs) Features() *fs.Features { return &fs.Features{} }
 
 // List the objects and directories in dir into entries.  The
 // entries can be returned in any order but should be for a
@@ -88,14 +92,14 @@ func (memoryFs) Features() *Features { return &Features{} }
 //
 // This should return ErrDirNotFound if the directory isn't
 // found.
-func (memoryFs) List(dir string) (entries DirEntries, err error) {
+func (memoryFs) List(dir string) (entries fs.DirEntries, err error) {
 	return nil, nil
 }
 
 // NewObject finds the Object at remote.  If it can't be found
 // it returns the error ErrorObjectNotFound.
-func (memoryFs) NewObject(remote string) (Object, error) {
-	return nil, ErrorObjectNotFound
+func (memoryFs) NewObject(remote string) (fs.Object, error) {
+	return nil, fs.ErrorObjectNotFound
 }
 
 // Put in to the remote path with the modTime given of the given size
@@ -103,7 +107,7 @@ func (memoryFs) NewObject(remote string) (Object, error) {
 // May create the object even if it returns an error - if so
 // will return the object and the error, otherwise will return
 // nil and the error
-func (memoryFs) Put(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) {
+func (memoryFs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
 	o := NewMemoryObject(src.Remote(), src.ModTime(), nil)
 	return o, o.Update(in, src, options...)
 }
@@ -119,10 +123,10 @@ func (memoryFs) Mkdir(dir string) error {
 //
 // Return an error if it doesn't exist or isn't empty
 func (memoryFs) Rmdir(dir string) error {
-	return ErrorDirNotFound
+	return fs.ErrorDirNotFound
 }
 
-var _ Fs = MemoryFs
+var _ fs.Fs = MemoryFs
 
 // MemoryObject is an in memory object
 type MemoryObject struct {
@@ -146,7 +150,7 @@ func (o *MemoryObject) Content() []byte {
 }
 
 // Fs returns read only access to the Fs that this object is part of
-func (o *MemoryObject) Fs() Info {
+func (o *MemoryObject) Fs() fs.Info {
 	return MemoryFs
 }
 
@@ -176,8 +180,8 @@ func (o *MemoryObject) Storable() bool {
 }
 
 // Hash returns the requested hash of the contents
-func (o *MemoryObject) Hash(h HashType) (string, error) {
-	hash, err := NewMultiHasherTypes(HashSet(h))
+func (o *MemoryObject) Hash(h hash.Type) (string, error) {
+	hash, err := hash.NewMultiHasherTypes(hash.Set(h))
 	if err != nil {
 		return "", err
 	}
@@ -195,17 +199,17 @@ func (o *MemoryObject) SetModTime(modTime time.Time) error {
 }
 
 // Open opens the file for read.  Call Close() on the returned io.ReadCloser
-func (o *MemoryObject) Open(options ...OpenOption) (io.ReadCloser, error) {
+func (o *MemoryObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
 	content := o.content
 	for _, option := range options {
 		switch x := option.(type) {
-		case *RangeOption:
+		case *fs.RangeOption:
 			content = o.content[x.Start:x.End]
-		case *SeekOption:
+		case *fs.SeekOption:
 			content = o.content[x.Offset:]
 		default:
 			if option.Mandatory() {
-				Logf(o, "Unsupported mandatory option: %v", option)
+				fs.Logf(o, "Unsupported mandatory option: %v", option)
 			}
 		}
 	}
@@ -215,7 +219,7 @@ func (o *MemoryObject) Open(options ...OpenOption) (io.ReadCloser, error) {
 // Update in to the object with the modTime given of the given size
 //
 // This re-uses the internal buffer if at all possible.
-func (o *MemoryObject) Update(in io.Reader, src ObjectInfo, options ...OpenOption) (err error) {
+func (o *MemoryObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
 	size := src.Size()
 	if size == 0 {
 		o.content = nil
diff --git a/fs/object_test.go b/fs/object/object_test.go
similarity index 70%
rename from fs/object_test.go
rename to fs/object/object_test.go
index 852709598..56e1480f2 100644
--- a/fs/object_test.go
+++ b/fs/object/object_test.go
@@ -1,4 +1,4 @@
-package fs_test
+package object_test
 
 import (
 	"bytes"
@@ -8,54 +8,51 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
-	"github.com/ncw/rclone/fstest"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/object"
 	"github.com/stretchr/testify/assert"
 )
 
 func TestStaticObject(t *testing.T) {
-	r := fstest.NewRun(t)
-	defer r.Finalise()
-
 	now := time.Now()
 	remote := "path/to/object"
 	size := int64(1024)
 
-	o := fs.NewStaticObjectInfo(remote, now, size, true, nil, r.Flocal)
+	o := object.NewStaticObjectInfo(remote, now, size, true, nil, object.MemoryFs)
 
-	assert.Equal(t, r.Flocal, o.Fs())
+	assert.Equal(t, object.MemoryFs, o.Fs())
 	assert.Equal(t, remote, o.Remote())
 	assert.Equal(t, remote, o.String())
 	assert.Equal(t, now, o.ModTime())
 	assert.Equal(t, size, o.Size())
 	assert.Equal(t, true, o.Storable())
 
-	hash, err := o.Hash(fs.HashMD5)
+	Hash, err := o.Hash(hash.HashMD5)
 	assert.NoError(t, err)
-	assert.Equal(t, "", hash)
+	assert.Equal(t, "", Hash)
 
-	o = fs.NewStaticObjectInfo(remote, now, size, true, nil, nil)
-	_, err = o.Hash(fs.HashMD5)
-	assert.Equal(t, fs.ErrHashUnsupported, err)
+	o = object.NewStaticObjectInfo(remote, now, size, true, nil, nil)
+	_, err = o.Hash(hash.HashMD5)
+	assert.Equal(t, hash.ErrHashUnsupported, err)
 
-	hs := map[fs.HashType]string{
-		fs.HashMD5: "potato",
+	hs := map[hash.Type]string{
+		hash.HashMD5: "potato",
 	}
-	o = fs.NewStaticObjectInfo(remote, now, size, true, hs, nil)
-	hash, err = o.Hash(fs.HashMD5)
+	o = object.NewStaticObjectInfo(remote, now, size, true, hs, nil)
+	Hash, err = o.Hash(hash.HashMD5)
 	assert.NoError(t, err)
-	assert.Equal(t, "potato", hash)
-	_, err = o.Hash(fs.HashSHA1)
-	assert.Equal(t, fs.ErrHashUnsupported, err)
-
+	assert.Equal(t, "potato", Hash)
+	_, err = o.Hash(hash.HashSHA1)
+	assert.Equal(t, hash.ErrHashUnsupported, err)
 }
 
 func TestMemoryFs(t *testing.T) {
-	f := fs.MemoryFs
+	f := object.MemoryFs
 	assert.Equal(t, "memory", f.Name())
 	assert.Equal(t, "", f.Root())
 	assert.Equal(t, "memory", f.String())
 	assert.Equal(t, time.Nanosecond, f.Precision())
-	assert.Equal(t, fs.SupportedHashes, f.Hashes())
+	assert.Equal(t, hash.SupportedHashes, f.Hashes())
 	assert.Equal(t, &fs.Features{}, f.Features())
 
 	entries, err := f.List("")
@@ -68,10 +65,10 @@ func TestMemoryFs(t *testing.T) {
 
 	buf := bytes.NewBufferString("potato")
 	now := time.Now()
-	src := fs.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil)
+	src := object.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil)
 	o, err = f.Put(buf, src)
 	assert.NoError(t, err)
-	hash, err := o.Hash(fs.HashSHA1)
+	hash, err := o.Hash(hash.HashSHA1)
 	assert.NoError(t, err)
 	assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash)
 
@@ -88,23 +85,23 @@ func TestMemoryObject(t *testing.T) {
 	content := []byte("potatoXXXXXXXXXXXXX")
 	content = content[:6] // make some extra cap
 
-	o := fs.NewMemoryObject(remote, now, content)
+	o := object.NewMemoryObject(remote, now, content)
 
 	assert.Equal(t, content, o.Content())
-	assert.Equal(t, fs.MemoryFs, o.Fs())
+	assert.Equal(t, object.MemoryFs, o.Fs())
 	assert.Equal(t, remote, o.Remote())
 	assert.Equal(t, remote, o.String())
 	assert.Equal(t, now, o.ModTime())
 	assert.Equal(t, int64(len(content)), o.Size())
 	assert.Equal(t, true, o.Storable())
 
-	hash, err := o.Hash(fs.HashMD5)
+	Hash, err := o.Hash(hash.HashMD5)
 	assert.NoError(t, err)
-	assert.Equal(t, "8ee2027983915ec78acc45027d874316", hash)
+	assert.Equal(t, "8ee2027983915ec78acc45027d874316", Hash)
 
-	hash, err = o.Hash(fs.HashSHA1)
+	Hash, err = o.Hash(hash.HashSHA1)
 	assert.NoError(t, err)
-	assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash)
+	assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", Hash)
 
 	newNow := now.Add(time.Minute)
 	err = o.SetModTime(newNow)
@@ -139,7 +136,7 @@ func TestMemoryObject(t *testing.T) {
 	newNow = now.Add(2 * time.Minute)
 	newContent := bytes.NewBufferString("Rutabaga")
 	assert.True(t, newContent.Len() < cap(content)) // fits within cap(content)
-	src := fs.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
+	src := object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
 	err = o.Update(newContent, src)
 	assert.NoError(t, err)
 	checkContent(o, "Rutabaga")
@@ -151,7 +148,7 @@ func TestMemoryObject(t *testing.T) {
 	newStr = newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr
 	newContent = bytes.NewBufferString(newStr)
 	assert.True(t, newContent.Len() > cap(content)) // does not fit within cap(content)
-	src = fs.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
+	src = object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
 	err = o.Update(newContent, src)
 	assert.NoError(t, err)
 	checkContent(o, newStr)
@@ -160,7 +157,7 @@ func TestMemoryObject(t *testing.T) {
 	// now try streaming
 	newStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
 	newContent = bytes.NewBufferString(newStr)
-	src = fs.NewStaticObjectInfo(remote, newNow, -1, true, nil, nil)
+	src = object.NewStaticObjectInfo(remote, newNow, -1, true, nil, nil)
 	err = o.Update(newContent, src)
 	assert.NoError(t, err)
 	checkContent(o, newStr)
@@ -168,7 +165,7 @@ func TestMemoryObject(t *testing.T) {
 	// and zero length
 	newStr = ""
 	newContent = bytes.NewBufferString(newStr)
-	src = fs.NewStaticObjectInfo(remote, newNow, 0, true, nil, nil)
+	src = object.NewStaticObjectInfo(remote, newNow, 0, true, nil, nil)
 	err = o.Update(newContent, src)
 	assert.NoError(t, err)
 	checkContent(o, newStr)
diff --git a/fs/operations/listdirsorted_test.go b/fs/operations/listdirsorted_test.go
new file mode 100644
index 000000000..7c846393a
--- /dev/null
+++ b/fs/operations/listdirsorted_test.go
@@ -0,0 +1,104 @@
+package operations_test
+
+import (
+	"testing"
+
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fs/list"
+	"github.com/ncw/rclone/fstest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// TestListDirSorted is integration testing code in fs/list/list.go
+// which can't be tested there due to import loops.
+func TestListDirSorted(t *testing.T) {
+	r := fstest.NewRun(t)
+	defer r.Finalise()
+
+	filter.Active.Opt.MaxSize = 10
+	defer func() {
+		filter.Active.Opt.MaxSize = -1
+	}()
+
+	files := []fstest.Item{
+		r.WriteObject("a.txt", "hello world", t1),
+		r.WriteObject("zend.txt", "hello", t1),
+		r.WriteObject("sub dir/hello world", "hello world", t1),
+		r.WriteObject("sub dir/hello world2", "hello world", t1),
+		r.WriteObject("sub dir/ignore dir/.ignore", "", t1),
+		r.WriteObject("sub dir/ignore dir/should be ignored", "to ignore", t1),
+		r.WriteObject("sub dir/sub sub dir/hello world3", "hello world", t1),
+	}
+	fstest.CheckItems(t, r.Fremote, files...)
+	var items fs.DirEntries
+	var err error
+
+	// Turn the DirEntry into a name, ending with a / if it is a
+	// dir
+	str := func(i int) string {
+		item := items[i]
+		name := item.Remote()
+		switch item.(type) {
+		case fs.Object:
+		case fs.Directory:
+			name += "/"
+		default:
+			t.Fatalf("Unknown type %+v", item)
+		}
+		return name
+	}
+
+	items, err = list.DirSorted(r.Fremote, true, "")
+	require.NoError(t, err)
+	require.Len(t, items, 3)
+	assert.Equal(t, "a.txt", str(0))
+	assert.Equal(t, "sub dir/", str(1))
+	assert.Equal(t, "zend.txt", str(2))
+
+	items, err = list.DirSorted(r.Fremote, false, "")
+	require.NoError(t, err)
+	require.Len(t, items, 2)
+	assert.Equal(t, "sub dir/", str(0))
+	assert.Equal(t, "zend.txt", str(1))
+
+	items, err = list.DirSorted(r.Fremote, true, "sub dir")
+	require.NoError(t, err)
+	require.Len(t, items, 4)
+	assert.Equal(t, "sub dir/hello world", str(0))
+	assert.Equal(t, "sub dir/hello world2", str(1))
+	assert.Equal(t, "sub dir/ignore dir/", str(2))
+	assert.Equal(t, "sub dir/sub sub dir/", str(3))
+
+	items, err = list.DirSorted(r.Fremote, false, "sub dir")
+	require.NoError(t, err)
+	require.Len(t, items, 2)
+	assert.Equal(t, "sub dir/ignore dir/", str(0))
+	assert.Equal(t, "sub dir/sub sub dir/", str(1))
+
+	// testing ignore file
+	filter.Active.Opt.ExcludeFile = ".ignore"
+
+	items, err = list.DirSorted(r.Fremote, false, "sub dir")
+	require.NoError(t, err)
+	require.Len(t, items, 1)
+	assert.Equal(t, "sub dir/sub sub dir/", str(0))
+
+	items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir")
+	require.NoError(t, err)
+	require.Len(t, items, 0)
+
+	items, err = list.DirSorted(r.Fremote, true, "sub dir/ignore dir")
+	require.NoError(t, err)
+	require.Len(t, items, 2)
+	assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
+	assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
+
+	filter.Active.Opt.ExcludeFile = ""
+	items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir")
+	require.NoError(t, err)
+	require.Len(t, items, 2)
+	assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
+	assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
+}
diff --git a/fs/operations.go b/fs/operations/operations.go
similarity index 59%
rename from fs/operations.go
rename to fs/operations/operations.go
index 1cb39fad9..b524b0b68 100644
--- a/fs/operations.go
+++ b/fs/operations/operations.go
@@ -1,6 +1,5 @@
-// Generic operations on filesystems and objects
-
-package fs
+// Package operations does generic operations on filesystems and objects
+package operations
 
 import (
 	"bytes"
@@ -8,7 +7,6 @@ import (
 	"io"
 	"io/ioutil"
 	"log"
-	"mime"
 	"path"
 	"sort"
 	"strconv"
@@ -17,41 +15,20 @@ import (
 	"sync/atomic"
 	"time"
 
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/march"
+	"github.com/ncw/rclone/fs/object"
+	"github.com/ncw/rclone/fs/walk"
+	"github.com/ncw/rclone/lib/readers"
 	"github.com/pkg/errors"
 	"github.com/spf13/pflag"
 	"golang.org/x/net/context"
 )
 
-// CalculateModifyWindow works out modify window for Fses passed in -
-// sets Config.ModifyWindow
-//
-// This is the largest modify window of all the fses in use, and the
-// user configured value
-func CalculateModifyWindow(fs ...Fs) {
-	for _, f := range fs {
-		if f != nil {
-			precision := f.Precision()
-			if precision > Config.ModifyWindow {
-				Config.ModifyWindow = precision
-			}
-			if precision == ModTimeNotSupported {
-				Infof(f, "Modify window not supported")
-				return
-			}
-		}
-	}
-	Infof(fs[0], "Modify window is %s", Config.ModifyWindow)
-}
-
-// HashEquals checks to see if src == dst, but ignores empty strings
-// and returns true if either is empty.
-func HashEquals(src, dst string) bool {
-	if src == "" || dst == "" {
-		return true
-	}
-	return src == dst
-}
-
 // CheckHashes checks the two files to see if they have common
 // known hash types and compares them
 //
@@ -65,36 +42,36 @@ func HashEquals(src, dst string) bool {
 // err - may return an error which will already have been logged
 //
 // If an error is returned it will return equal as false
-func CheckHashes(src ObjectInfo, dst Object) (equal bool, hash HashType, err error) {
+func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) {
 	common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
-	// Debugf(nil, "Shared hashes: %v", common)
+	// fs.Debugf(nil, "Shared hashes: %v", common)
 	if common.Count() == 0 {
-		return true, HashNone, nil
+		return true, hash.HashNone, nil
 	}
-	hash = common.GetOne()
-	srcHash, err := src.Hash(hash)
+	ht = common.GetOne()
+	srcHash, err := src.Hash(ht)
 	if err != nil {
-		Stats.Error(err)
-		Errorf(src, "Failed to calculate src hash: %v", err)
-		return false, hash, err
+		fs.CountError(err)
+		fs.Errorf(src, "Failed to calculate src hash: %v", err)
+		return false, ht, err
 	}
 	if srcHash == "" {
-		return true, HashNone, nil
+		return true, hash.HashNone, nil
 	}
-	dstHash, err := dst.Hash(hash)
+	dstHash, err := dst.Hash(ht)
 	if err != nil {
-		Stats.Error(err)
-		Errorf(dst, "Failed to calculate dst hash: %v", err)
-		return false, hash, err
+		fs.CountError(err)
+		fs.Errorf(dst, "Failed to calculate dst hash: %v", err)
+		return false, ht, err
 	}
 	if dstHash == "" {
-		return true, HashNone, nil
+		return true, hash.HashNone, nil
 	}
 	if srcHash != dstHash {
-		Debugf(src, "%v = %s (%v)", hash, srcHash, src.Fs())
-		Debugf(dst, "%v = %s (%v)", hash, dstHash, dst.Fs())
+		fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs())
+		fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs())
 	}
-	return srcHash == dstHash, hash, nil
+	return srcHash == dstHash, ht, nil
 }
 
 // Equal checks to see if the src and dst objects are equal by looking at
@@ -115,19 +92,19 @@ func CheckHashes(src ObjectInfo, dst Object) (equal bool, hash HashType, err err
 //
 // Otherwise the file is considered to be not equal including if there
 // were errors reading info.
-func Equal(src ObjectInfo, dst Object) bool {
-	return equal(src, dst, Config.SizeOnly, Config.CheckSum)
+func Equal(src fs.ObjectInfo, dst fs.Object) bool {
+	return equal(src, dst, fs.Config.SizeOnly, fs.Config.CheckSum)
 }
 
-func equal(src ObjectInfo, dst Object, sizeOnly, checkSum bool) bool {
-	if !Config.IgnoreSize {
+func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool {
+	if !fs.Config.IgnoreSize {
 		if src.Size() != dst.Size() {
-			Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
+			fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
 			return false
 		}
 	}
 	if sizeOnly {
-		Debugf(src, "Sizes identical")
+		fs.Debugf(src, "Sizes identical")
 		return true
 	}
 
@@ -136,114 +113,91 @@ func equal(src ObjectInfo, dst Object, sizeOnly, checkSum bool) bool {
 	// If checking checksum and not modtime
 	if checkSum {
 		// Check the hash
-		same, hash, _ := CheckHashes(src, dst)
+		same, ht, _ := CheckHashes(src, dst)
 		if !same {
-			Debugf(src, "%v differ", hash)
+			fs.Debugf(src, "%v differ", ht)
 			return false
 		}
-		if hash == HashNone {
-			Debugf(src, "Size of src and dst objects identical")
+		if ht == hash.HashNone {
+			fs.Debugf(src, "Size of src and dst objects identical")
 		} else {
-			Debugf(src, "Size and %v of src and dst objects identical", hash)
+			fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
 		}
 		return true
 	}
 
 	// Sizes the same so check the mtime
-	if Config.ModifyWindow == ModTimeNotSupported {
-		Debugf(src, "Sizes identical")
+	if fs.Config.ModifyWindow == fs.ModTimeNotSupported {
+		fs.Debugf(src, "Sizes identical")
 		return true
 	}
 	srcModTime := src.ModTime()
 	dstModTime := dst.ModTime()
 	dt := dstModTime.Sub(srcModTime)
-	ModifyWindow := Config.ModifyWindow
+	ModifyWindow := fs.Config.ModifyWindow
 	if dt < ModifyWindow && dt > -ModifyWindow {
-		Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
+		fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
 		return true
 	}
 
-	Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
+	fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
 
 	// Check if the hashes are the same
-	same, hash, _ := CheckHashes(src, dst)
+	same, ht, _ := CheckHashes(src, dst)
 	if !same {
-		Debugf(src, "%v differ", hash)
+		fs.Debugf(src, "%v differ", ht)
 		return false
 	}
-	if hash == HashNone {
+	if ht == hash.HashNone {
 		// if couldn't check hash, return that they differ
 		return false
 	}
 
 	// mod time differs but hash is the same to reset mod time if required
-	if !Config.NoUpdateModTime {
-		if Config.DryRun {
-			Logf(src, "Not updating modification time as --dry-run")
+	if !fs.Config.NoUpdateModTime {
+		if fs.Config.DryRun {
+			fs.Logf(src, "Not updating modification time as --dry-run")
 		} else {
 			// Size and hash the same but mtime different
 			// Error if objects are treated as immutable
-			if Config.Immutable {
-				Errorf(dst, "Timestamp mismatch between immutable objects")
+			if fs.Config.Immutable {
+				fs.Errorf(dst, "Timestamp mismatch between immutable objects")
 				return false
 			}
 			// Update the mtime of the dst object here
 			err := dst.SetModTime(srcModTime)
-			if err == ErrorCantSetModTime {
-				Debugf(dst, "src and dst identical but can't set mod time without re-uploading")
+			if err == fs.ErrorCantSetModTime {
+				fs.Debugf(dst, "src and dst identical but can't set mod time without re-uploading")
 				return false
-			} else if err == ErrorCantSetModTimeWithoutDelete {
-				Debugf(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
+			} else if err == fs.ErrorCantSetModTimeWithoutDelete {
+				fs.Debugf(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
 				err = dst.Remove()
 				if err != nil {
-					Errorf(dst, "failed to delete before re-upload: %v", err)
+					fs.Errorf(dst, "failed to delete before re-upload: %v", err)
 				}
 				return false
 			} else if err != nil {
-				Stats.Error(err)
-				Errorf(dst, "Failed to set modification time: %v", err)
+				fs.CountError(err)
+				fs.Errorf(dst, "Failed to set modification time: %v", err)
 			} else {
-				Infof(src, "Updated modification time in destination")
+				fs.Infof(src, "Updated modification time in destination")
 			}
 		}
 	}
 	return true
 }
 
-// MimeTypeFromName returns a guess at the mime type from the name
-func MimeTypeFromName(remote string) (mimeType string) {
-	mimeType = mime.TypeByExtension(path.Ext(remote))
-	if !strings.ContainsRune(mimeType, '/') {
-		mimeType = "application/octet-stream"
-	}
-	return mimeType
-}
-
-// MimeType returns the MimeType from the object, either by calling
-// the MimeTyper interface or using MimeTypeFromName
-func MimeType(o ObjectInfo) (mimeType string) {
-	// Read the MimeType from the optional interface if available
-	if do, ok := o.(MimeTyper); ok {
-		mimeType = do.MimeType()
-		// Debugf(o, "Read MimeType as %q", mimeType)
-		if mimeType != "" {
-			return mimeType
-		}
-	}
-	return MimeTypeFromName(o.Remote())
-}
-
 // Used to remove a failed copy
 //
 // Returns whether the file was succesfully removed or not
-func removeFailedCopy(dst Object) bool {
+func removeFailedCopy(dst fs.Object) bool {
 	if dst == nil {
 		return false
 	}
-	Infof(dst, "Removing failed copy")
+	fs.Infof(dst, "Removing failed copy")
 	removeErr := dst.Remove()
 	if removeErr != nil {
-		Infof(dst, "Failed to remove failed copy: %s", removeErr)
+		fs.Infof(dst, "Failed to remove failed copy: %s", removeErr)
 		return false
 	}
 	return true
@@ -251,7 +205,7 @@ func removeFailedCopy(dst Object) bool {
 
 // Wrapper to override the remote for an object
 type overrideRemoteObject struct {
-	Object
+	fs.Object
 	remote string
 }
 
@@ -263,40 +217,40 @@ func (o *overrideRemoteObject) Remote() string {
 // MimeType returns the mime type of the underlying object or "" if it
 // can't be worked out
 func (o *overrideRemoteObject) MimeType() string {
-	if do, ok := o.Object.(MimeTyper); ok {
+	if do, ok := o.Object.(fs.MimeTyper); ok {
 		return do.MimeType()
 	}
 	return ""
 }
 
 // Check interface is satisfied
-var _ MimeTyper = (*overrideRemoteObject)(nil)
+var _ fs.MimeTyper = (*overrideRemoteObject)(nil)
 
 // Copy src object to dst or f if nil.  If dst is nil then it uses
 // remote as the name of the new object.
 //
 // It returns the destination object if possible.  Note that this may
 // be nil.
-func Copy(f Fs, dst Object, remote string, src Object) (newDst Object, err error) {
+func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
 	newDst = dst
-	if Config.DryRun {
-		Logf(src, "Not copying as --dry-run")
+	if fs.Config.DryRun {
+		fs.Logf(src, "Not copying as --dry-run")
 		return newDst, nil
 	}
-	maxTries := Config.LowLevelRetries
+	maxTries := fs.Config.LowLevelRetries
 	tries := 0
 	doUpdate := dst != nil
 	// work out which hash to use - limit to 1 hash in common
-	var common HashSet
-	hashType := HashNone
-	if !Config.SizeOnly {
+	var common hash.Set
+	hashType := hash.HashNone
+	if !fs.Config.SizeOnly {
 		common = src.Fs().Hashes().Overlap(f.Hashes())
 		if common.Count() > 0 {
 			hashType = common.GetOne()
-			common = HashSet(hashType)
+			common = hash.Set(hashType)
 		}
 	}
-	hashOption := &HashesOption{Hashes: common}
+	hashOption := &fs.HashesOption{Hashes: common}
 	var actionTaken string
 	for {
 		// Try server side copy first - if has optional interface and
@@ -308,17 +262,17 @@ func Copy(f Fs, dst Object, remote string, src Object) (newDst Object, err error
 				dst = newDst
 			}
 		} else {
-			err = ErrorCantCopy
+			err = fs.ErrorCantCopy
 		}
 		// If can't server side copy, do it manually
-		if err == ErrorCantCopy {
+		if err == fs.ErrorCantCopy {
 			var in0 io.ReadCloser
 			in0, err = src.Open(hashOption)
 			if err != nil {
 				err = errors.Wrap(err, "failed to open source object")
 			} else {
-				in := NewAccount(in0, src).WithBuffer() // account and buffer the transfer
-				var wrappedSrc ObjectInfo = src
+				in := accounting.NewAccount(in0, src).WithBuffer() // account and buffer the transfer
+				var wrappedSrc fs.ObjectInfo = src
 				// We try to pass the original object if possible
 				if src.Remote() != remote {
 					wrappedSrc = &overrideRemoteObject{Object: src, remote: remote}
@@ -342,24 +296,24 @@ func Copy(f Fs, dst Object, remote string, src Object) (newDst Object, err error
 			break
 		}
 		// Retry if err returned a retry error
-		if IsRetryError(err) || ShouldRetry(err) {
-			Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
+		if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
+			fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
 			continue
 		}
 		// otherwise finish
 		break
 	}
 	if err != nil {
-		Stats.Error(err)
-		Errorf(src, "Failed to copy: %v", err)
+		fs.CountError(err)
+		fs.Errorf(src, "Failed to copy: %v", err)
 		return newDst, err
 	}
 
 	// Verify sizes are the same after transfer
-	if !Config.IgnoreSize && src.Size() != dst.Size() {
+	if !fs.Config.IgnoreSize && src.Size() != dst.Size() {
 		err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
-		Errorf(dst, "%v", err)
-		Stats.Error(err)
+		fs.Errorf(dst, "%v", err)
+		fs.CountError(err)
 		removeFailedCopy(dst)
 		return newDst, err
 	}
@@ -367,29 +321,29 @@ func Copy(f Fs, dst Object, remote string, src Object) (newDst Object, err error
 	// Verify hashes are the same after transfer - ignoring blank hashes
 	// TODO(klauspost): This could be extended, so we always create a hash type matching
 	// the destination, and calculate it while sending.
-	if hashType != HashNone {
+	if hashType != hash.HashNone {
 		var srcSum string
 		srcSum, err = src.Hash(hashType)
 		if err != nil {
-			Stats.Error(err)
-			Errorf(src, "Failed to read src hash: %v", err)
+			fs.CountError(err)
+			fs.Errorf(src, "Failed to read src hash: %v", err)
 		} else if srcSum != "" {
 			var dstSum string
 			dstSum, err = dst.Hash(hashType)
 			if err != nil {
-				Stats.Error(err)
-				Errorf(dst, "Failed to read hash: %v", err)
-			} else if !Config.IgnoreChecksum && !HashEquals(srcSum, dstSum) {
+				fs.CountError(err)
+				fs.Errorf(dst, "Failed to read hash: %v", err)
+			} else if !fs.Config.IgnoreChecksum && !hash.Equals(srcSum, dstSum) {
 				err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
-				Errorf(dst, "%v", err)
-				Stats.Error(err)
+				fs.Errorf(dst, "%v", err)
+				fs.CountError(err)
 				removeFailedCopy(dst)
 				return newDst, err
 			}
 		}
 	}
 
-	Infof(src, actionTaken)
+	fs.Infof(src, actionTaken)
 	return newDst, err
 }
 
@@ -398,10 +352,10 @@ func Copy(f Fs, dst Object, remote string, src Object) (newDst Object, err error
 //
 // It returns the destination object if possible.  Note that this may
 // be nil.
-func Move(fdst Fs, dst Object, remote string, src Object) (newDst Object, err error) {
+func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
 	newDst = dst
-	if Config.DryRun {
-		Logf(src, "Not moving as --dry-run")
+	if fs.Config.DryRun {
+		fs.Logf(src, "Not moving as --dry-run")
 		return newDst, nil
 	}
 	// See if we have Move available
@@ -417,20 +371,20 @@ func Move(fdst Fs, dst Object, remote string, src Object) (newDst Object, err er
 		newDst, err = doMove(src, remote)
 		switch err {
 		case nil:
-			Infof(src, "Moved (server side)")
+			fs.Infof(src, "Moved (server side)")
 			return newDst, nil
-		case ErrorCantMove:
-			Debugf(src, "Can't move, switching to copy")
+		case fs.ErrorCantMove:
+			fs.Debugf(src, "Can't move, switching to copy")
 		default:
-			Stats.Error(err)
-			Errorf(src, "Couldn't move: %v", err)
+			fs.CountError(err)
+			fs.Errorf(src, "Couldn't move: %v", err)
 			return newDst, err
 		}
 	}
 	// Move not found or didn't work so copy dst <- src
 	newDst, err = Copy(fdst, dst, remote, src)
 	if err != nil {
-		Errorf(src, "Not deleting source as copy failed: %v", err)
+		fs.Errorf(src, "Not deleting source as copy failed: %v", err)
 		return newDst, err
 	}
 	// Delete src if no error on copy
@@ -442,30 +396,30 @@ func Move(fdst Fs, dst Object, remote string, src Object) (newDst Object, err er
 //
 // Some remotes simulate rename by server-side copy and delete, so include
 // remotes that implements either Mover or Copier.
-func CanServerSideMove(fdst Fs) bool {
+func CanServerSideMove(fdst fs.Fs) bool {
 	canMove := fdst.Features().Move != nil
 	canCopy := fdst.Features().Copy != nil
 	return canMove || canCopy
 }
 
-// deleteFileWithBackupDir deletes a single file respecting --dry-run
+// DeleteFileWithBackupDir deletes a single file respecting --dry-run
 // and accumulating stats and errors.
 //
 // If backupDir is set then it moves the file to there instead of
 // deleting
-func deleteFileWithBackupDir(dst Object, backupDir Fs) (err error) {
-	Stats.Checking(dst.Remote())
+func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) {
+	accounting.Stats.Checking(dst.Remote())
 	action, actioned, actioning := "delete", "Deleted", "deleting"
 	if backupDir != nil {
 		action, actioned, actioning = "move into backup dir", "Moved into backup dir", "moving into backup dir"
 	}
-	if Config.DryRun {
-		Logf(dst, "Not %s as --dry-run", actioning)
+	if fs.Config.DryRun {
+		fs.Logf(dst, "Not %s as --dry-run", actioning)
 	} else if backupDir != nil {
 		if !SameConfig(dst.Fs(), backupDir) {
 			err = errors.New("parameter to --backup-dir has to be on the same remote as destination")
 		} else {
-			remoteWithSuffix := dst.Remote() + Config.Suffix
+			remoteWithSuffix := dst.Remote() + fs.Config.Suffix
 			overwritten, _ := backupDir.NewObject(remoteWithSuffix)
 			_, err = Move(backupDir, overwritten, remoteWithSuffix, dst)
 		}
@@ -473,12 +427,12 @@ func deleteFileWithBackupDir(dst Object, backupDir Fs) (err error) {
 		err = dst.Remove()
 	}
 	if err != nil {
-		Stats.Error(err)
-		Errorf(dst, "Couldn't %s: %v", action, err)
-	} else if !Config.DryRun {
-		Infof(dst, actioned)
+		fs.CountError(err)
+		fs.Errorf(dst, "Couldn't %s: %v", action, err)
+	} else if !fs.Config.DryRun {
+		fs.Infof(dst, actioned)
 	}
-	Stats.DoneChecking(dst.Remote())
+	accounting.Stats.DoneChecking(dst.Remote())
 	return err
 }
 
@@ -486,31 +440,31 @@ func deleteFileWithBackupDir(dst Object, backupDir Fs) (err error) {
 //
 // If useBackupDir is set and --backup-dir is in effect then it moves
 // the file to there instead of deleting
-func DeleteFile(dst Object) (err error) {
-	return deleteFileWithBackupDir(dst, nil)
+func DeleteFile(dst fs.Object) (err error) {
+	return DeleteFileWithBackupDir(dst, nil)
 }
 
-// deleteFilesWithBackupDir removes all the files passed in the
+// DeleteFilesWithBackupDir removes all the files passed in the
 // channel
 //
 // If backupDir is set the files will be placed into that directory
 // instead of being deleted.
-func deleteFilesWithBackupDir(toBeDeleted ObjectsChan, backupDir Fs) error {
+func DeleteFilesWithBackupDir(toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
 	var wg sync.WaitGroup
-	wg.Add(Config.Transfers)
+	wg.Add(fs.Config.Transfers)
 	var errorCount int32
-	for i := 0; i < Config.Transfers; i++ {
+	for i := 0; i < fs.Config.Transfers; i++ {
 		go func() {
 			defer wg.Done()
 			for dst := range toBeDeleted {
-				err := deleteFileWithBackupDir(dst, backupDir)
+				err := DeleteFileWithBackupDir(dst, backupDir)
 				if err != nil {
 					atomic.AddInt32(&errorCount, 1)
 				}
 			}
 		}()
 	}
-	Infof(nil, "Waiting for deletions to finish")
+	fs.Infof(nil, "Waiting for deletions to finish")
 	wg.Wait()
 	if errorCount > 0 {
 		return errors.Errorf("failed to delete %d files", errorCount)
@@ -519,8 +473,8 @@ func deleteFilesWithBackupDir(toBeDeleted ObjectsChan, backupDir Fs) error {
 }
 
 // DeleteFiles removes all the files passed in the channel
-func DeleteFiles(toBeDeleted ObjectsChan) error {
-	return deleteFilesWithBackupDir(toBeDeleted, nil)
+func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
+	return DeleteFilesWithBackupDir(toBeDeleted, nil)
 }
 
 // Read a Objects into add() for the given Fs.
@@ -530,8 +484,8 @@ func DeleteFiles(toBeDeleted ObjectsChan) error {
 //
 // Each object is passed ito the function provided.  If that returns
 // an error then the listing will be aborted and that error returned.
-func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object) error) (err error) {
-	return Walk(fs, "", includeAll, Config.MaxDepth, func(dirPath string, entries DirEntries, err error) error {
+func readFilesFn(f fs.Fs, includeAll bool, dir string, add func(fs.Object) error) (err error) {
+	return walk.Walk(f, "", includeAll, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
 			return err
 		}
@@ -539,194 +493,25 @@ func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object) error) (er
 	})
 }
 
-// DirEntries is a slice of Object or *Dir
-type DirEntries []DirEntry
-
-// Len is part of sort.Interface.
-func (ds DirEntries) Len() int {
-	return len(ds)
-}
-
-// Swap is part of sort.Interface.
-func (ds DirEntries) Swap(i, j int) {
-	ds[i], ds[j] = ds[j], ds[i]
-}
-
-// Less is part of sort.Interface.
-func (ds DirEntries) Less(i, j int) bool {
-	return ds[i].Remote() < ds[j].Remote()
-}
-
-// ForObject runs the function supplied on every object in the entries
-func (ds DirEntries) ForObject(fn func(o Object)) {
-	for _, entry := range ds {
-		o, ok := entry.(Object)
-		if ok {
-			fn(o)
-		}
-	}
-}
-
-// ForObjectError runs the function supplied on every object in the entries
-func (ds DirEntries) ForObjectError(fn func(o Object) error) error {
-	for _, entry := range ds {
-		o, ok := entry.(Object)
-		if ok {
-			err := fn(o)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-// ForDir runs the function supplied on every Directory in the entries
-func (ds DirEntries) ForDir(fn func(dir Directory)) {
-	for _, entry := range ds {
-		dir, ok := entry.(Directory)
-		if ok {
-			fn(dir)
-		}
-	}
-}
-
-// ForDirError runs the function supplied on every Directory in the entries
-func (ds DirEntries) ForDirError(fn func(dir Directory) error) error {
-	for _, entry := range ds {
-		dir, ok := entry.(Directory)
-		if ok {
-			err := fn(dir)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-// DirEntryType returns a string description of the DirEntry, either
-// "object", "directory" or "unknown type XXX"
-func DirEntryType(d DirEntry) string {
-	switch d.(type) {
-	case Object:
-		return "object"
-	case Directory:
-		return "directory"
-	}
-	return fmt.Sprintf("unknown type %T", d)
-}
-
-// ListDirSorted reads Object and *Dir into entries for the given Fs.
-//
-// dir is the start directory, "" for root
-//
-// If includeAll is specified all files will be added, otherwise only
-// files and directories passing the filter will be added.
-//
-// Files will be returned in sorted order
-func ListDirSorted(fs Fs, includeAll bool, dir string) (entries DirEntries, err error) {
-	// Get unfiltered entries from the fs
-	entries, err = fs.List(dir)
-	if err != nil {
-		return nil, err
-	}
-	// This should happen only if exclude files lives in the
-	// starting directory, otherwise ListDirSorted should not be
-	// called.
-	if !includeAll && Config.Filter.ListContainsExcludeFile(entries) {
-		Debugf(dir, "Excluded from sync (and deletion)")
-		return nil, nil
-	}
-	return filterAndSortDir(entries, includeAll, dir, Config.Filter.IncludeObject, Config.Filter.IncludeDirectory(fs))
-}
-
-// filter (if required) and check the entries, then sort them
-func filterAndSortDir(entries DirEntries, includeAll bool, dir string,
-	IncludeObject func(o Object) bool,
-	IncludeDirectory func(remote string) (bool, error)) (newEntries DirEntries, err error) {
-	newEntries = entries[:0] // in place filter
-	prefix := ""
-	if dir != "" {
-		prefix = dir + "/"
-	}
-	for _, entry := range entries {
-		ok := true
-		// check includes and types
-		switch x := entry.(type) {
-		case Object:
-			// Make sure we don't delete excluded files if not required
-			if !includeAll && !IncludeObject(x) {
-				ok = false
-				Debugf(x, "Excluded from sync (and deletion)")
-			}
-		case Directory:
-			if !includeAll {
-				include, err := IncludeDirectory(x.Remote())
-				if err != nil {
-					return nil, err
-				}
-				if !include {
-					ok = false
-					Debugf(x, "Excluded from sync (and deletion)")
-				}
-			}
-		default:
-			return nil, errors.Errorf("unknown object type %T", entry)
-		}
-		// check remote name belongs in this directry
-		remote := entry.Remote()
-		switch {
-		case !ok:
-			// ignore
-		case !strings.HasPrefix(remote, prefix):
-			ok = false
-			Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
-		case remote == prefix:
-			ok = false
-			Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
-		case strings.ContainsRune(remote[len(prefix):], '/'):
-			ok = false
-			Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
-		default:
-			// ok
-		}
-		if ok {
-			newEntries = append(newEntries, entry)
-		}
-	}
-	entries = newEntries
-
-	// Sort the directory entries by Remote
-	//
-	// We use a stable sort here just in case there are
-	// duplicates. Assuming the remote delivers the entries in a
-	// consistent order, this will give the best user experience
-	// in syncing as it will use the first entry for the sync
-	// comparison.
-	sort.Stable(entries)
-	return entries, nil
-}
-
 // SameConfig returns true if fdst and fsrc are using the same config
 // file entry
-func SameConfig(fdst, fsrc Info) bool {
+func SameConfig(fdst, fsrc fs.Info) bool {
 	return fdst.Name() == fsrc.Name()
 }
 
 // Same returns true if fdst and fsrc point to the same underlying Fs
-func Same(fdst, fsrc Info) bool {
+func Same(fdst, fsrc fs.Info) bool {
 	return SameConfig(fdst, fsrc) && fdst.Root() == fsrc.Root()
 }
 
 // Overlapping returns true if fdst and fsrc point to the same
 // underlying Fs and they overlap.
-func Overlapping(fdst, fsrc Info) bool {
+func Overlapping(fdst, fsrc fs.Info) bool {
 	if !SameConfig(fdst, fsrc) {
 		return false
 	}
 	// Return the Root with a trailing / if not empty
-	fixedRoot := func(f Info) string {
+	fixedRoot := func(f fs.Info) string {
 		s := strings.Trim(f.Root(), "/")
 		if s != "" {
 			s += "/"
@@ -742,31 +527,31 @@ func Overlapping(fdst, fsrc Info) bool {
 //
 // it returns true if differences were found
 // it also returns whether it couldn't be hashed
-func checkIdentical(dst, src Object) (differ bool, noHash bool) {
-	same, hash, err := CheckHashes(src, dst)
+func checkIdentical(dst, src fs.Object) (differ bool, noHash bool) {
+	same, ht, err := CheckHashes(src, dst)
 	if err != nil {
 		// CheckHashes will log and count errors
 		return true, false
 	}
-	if hash == HashNone {
+	if ht == hash.HashNone {
 		return false, true
 	}
 	if !same {
-		err = errors.Errorf("%v differ", hash)
-		Errorf(src, "%v", err)
-		Stats.Error(err)
+		err = errors.Errorf("%v differ", ht)
+		fs.Errorf(src, "%v", err)
+		fs.CountError(err)
 		return true, false
 	}
 	return false, false
 }
 
 // checkFn is the the type of the checking function used in CheckFn()
-type checkFn func(a, b Object) (differ bool, noHash bool)
+type checkFn func(a, b fs.Object) (differ bool, noHash bool)
 
 // checkMarch is used to march over two Fses in the same way as
 // sync/copy
 type checkMarch struct {
-	fdst, fsrc      Fs
+	fdst, fsrc      fs.Fs
 	check           checkFn
 	differences     int32
 	noHashes        int32
@@ -775,15 +560,15 @@ type checkMarch struct {
 }
 
 // DstOnly have an object which is in the destination only
-func (c *checkMarch) DstOnly(dst DirEntry) (recurse bool) {
+func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
 	switch dst.(type) {
-	case Object:
+	case fs.Object:
 		err := errors.Errorf("File not in %v", c.fsrc)
-		Errorf(dst, "%v", err)
-		Stats.Error(err)
+		fs.Errorf(dst, "%v", err)
+		fs.CountError(err)
 		atomic.AddInt32(&c.differences, 1)
 		atomic.AddInt32(&c.srcFilesMissing, 1)
-	case Directory:
+	case fs.Directory:
 		// Do the same thing to the entire contents of the directory
 		return true
 	default:
@@ -793,15 +578,15 @@ func (c *checkMarch) DstOnly(dst DirEntry) (recurse bool) {
 }
 
 // SrcOnly have an object which is in the source only
-func (c *checkMarch) SrcOnly(src DirEntry) (recurse bool) {
+func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
 	switch src.(type) {
-	case Object:
+	case fs.Object:
 		err := errors.Errorf("File not in %v", c.fdst)
-		Errorf(src, "%v", err)
-		Stats.Error(err)
+		fs.Errorf(src, "%v", err)
+		fs.CountError(err)
 		atomic.AddInt32(&c.differences, 1)
 		atomic.AddInt32(&c.dstFilesMissing, 1)
-	case Directory:
+	case fs.Directory:
 		// Do the same thing to the entire contents of the directory
 		return true
 	default:
@@ -811,52 +596,52 @@ func (c *checkMarch) SrcOnly(src DirEntry) (recurse bool) {
 }
 
 // check to see if two objects are identical using the check function
-func (c *checkMarch) checkIdentical(dst, src Object) (differ bool, noHash bool) {
-	Stats.Checking(src.Remote())
-	defer Stats.DoneChecking(src.Remote())
-	if !Config.IgnoreSize && src.Size() != dst.Size() {
+func (c *checkMarch) checkIdentical(dst, src fs.Object) (differ bool, noHash bool) {
+	accounting.Stats.Checking(src.Remote())
+	defer accounting.Stats.DoneChecking(src.Remote())
+	if !fs.Config.IgnoreSize && src.Size() != dst.Size() {
 		err := errors.Errorf("Sizes differ")
-		Errorf(src, "%v", err)
-		Stats.Error(err)
+		fs.Errorf(src, "%v", err)
+		fs.CountError(err)
 		return true, false
 	}
-	if Config.SizeOnly {
+	if fs.Config.SizeOnly {
 		return false, false
 	}
 	return c.check(dst, src)
 }
 
 // Match is called when src and dst are present, so sync src to dst
-func (c *checkMarch) Match(dst, src DirEntry) (recurse bool) {
+func (c *checkMarch) Match(dst, src fs.DirEntry) (recurse bool) {
 	switch srcX := src.(type) {
-	case Object:
-		dstX, ok := dst.(Object)
+	case fs.Object:
+		dstX, ok := dst.(fs.Object)
 		if ok {
 			differ, noHash := c.checkIdentical(dstX, srcX)
 			if differ {
 				atomic.AddInt32(&c.differences, 1)
 			} else {
-				Debugf(dstX, "OK")
+				fs.Debugf(dstX, "OK")
 			}
 			if noHash {
 				atomic.AddInt32(&c.noHashes, 1)
 			}
 		} else {
 			err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst)
-			Errorf(src, "%v", err)
-			Stats.Error(err)
+			fs.Errorf(src, "%v", err)
+			fs.CountError(err)
 			atomic.AddInt32(&c.differences, 1)
 			atomic.AddInt32(&c.dstFilesMissing, 1)
 		}
-	case Directory:
+	case fs.Directory:
 		// Do the same thing to the entire contents of the directory
-		_, ok := dst.(Directory)
+		_, ok := dst.(fs.Directory)
 		if ok {
 			return true
 		}
 		err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc)
-		Errorf(dst, "%v", err)
-		Stats.Error(err)
+		fs.Errorf(dst, "%v", err)
+		fs.CountError(err)
 		atomic.AddInt32(&c.differences, 1)
 		atomic.AddInt32(&c.srcFilesMissing, 1)
 
@@ -873,7 +658,7 @@ func (c *checkMarch) Match(dst, src DirEntry) (recurse bool) {
 //
 // it returns true if differences were found
 // it also returns whether it couldn't be hashed
-func CheckFn(fdst, fsrc Fs, check checkFn) error {
+func CheckFn(fdst, fsrc fs.Fs, check checkFn) error {
 	c := &checkMarch{
 		fdst:  fdst,
 		fsrc:  fsrc,
@@ -881,20 +666,20 @@ func CheckFn(fdst, fsrc Fs, check checkFn) error {
 	}
 
 	// set up a march over fdst and fsrc
-	m := newMarch(context.Background(), fdst, fsrc, "", c)
-	Infof(fdst, "Waiting for checks to finish")
-	m.run()
+	m := march.New(context.Background(), fdst, fsrc, "", c)
+	fs.Infof(fdst, "Waiting for checks to finish")
+	m.Run()
 
 	if c.dstFilesMissing > 0 {
-		Logf(fdst, "%d files missing", c.dstFilesMissing)
+		fs.Logf(fdst, "%d files missing", c.dstFilesMissing)
 	}
 	if c.srcFilesMissing > 0 {
-		Logf(fsrc, "%d files missing", c.srcFilesMissing)
+		fs.Logf(fsrc, "%d files missing", c.srcFilesMissing)
 	}
 
-	Logf(fdst, "%d differences found", Stats.GetErrors())
+	fs.Logf(fdst, "%d differences found", accounting.Stats.GetErrors())
 	if c.noHashes > 0 {
-		Logf(fdst, "%d hashes could not be checked", c.noHashes)
+		fs.Logf(fdst, "%d hashes could not be checked", c.noHashes)
 	}
 	if c.differences > 0 {
 		return errors.Errorf("%d differences found", c.differences)
@@ -903,25 +688,10 @@ func CheckFn(fdst, fsrc Fs, check checkFn) error {
 }
 
 // Check the files in fsrc and fdst according to Size and hash
-func Check(fdst, fsrc Fs) error {
+func Check(fdst, fsrc fs.Fs) error {
 	return CheckFn(fdst, fsrc, checkIdentical)
 }
 
-// ReadFill reads as much data from r into buf as it can
-//
-// It reads until the buffer is full or r.Read returned an error.
-//
-// This is io.ReadFull but when you just want as much data as
-// possible, not an exact size of block.
-func ReadFill(r io.Reader, buf []byte) (n int, err error) {
-	var nn int
-	for n < len(buf) && err == nil {
-		nn, err = r.Read(buf[n:])
-		n += nn
-	}
-	return n, err
-}
-
 // CheckEqualReaders checks to see if in1 and in2 have the same
 // content when read.
 //
@@ -931,8 +701,8 @@ func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
 	buf1 := make([]byte, bufSize)
 	buf2 := make([]byte, bufSize)
 	for {
-		n1, err1 := ReadFill(in1, buf1)
-		n2, err2 := ReadFill(in2, buf2)
+		n1, err1 := readers.ReadFill(in1, buf1)
+		n2, err2 := readers.ReadFill(in2, buf2)
 		// check errors
 		if err1 != nil && err1 != io.EOF {
 			return true, err1
@@ -956,32 +726,32 @@ func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
 // reading all their bytes if necessary.
 //
 // it returns true if differences were found
-func CheckIdentical(dst, src Object) (differ bool, err error) {
+func CheckIdentical(dst, src fs.Object) (differ bool, err error) {
 	in1, err := dst.Open()
 	if err != nil {
 		return true, errors.Wrapf(err, "failed to open %q", dst)
 	}
-	in1 = NewAccount(in1, dst).WithBuffer() // account and buffer the transfer
-	defer CheckClose(in1, &err)
+	in1 = accounting.NewAccount(in1, dst).WithBuffer() // account and buffer the transfer
+	defer fs.CheckClose(in1, &err)
 
 	in2, err := src.Open()
 	if err != nil {
 		return true, errors.Wrapf(err, "failed to open %q", src)
 	}
-	in2 = NewAccount(in2, src).WithBuffer() // account and buffer the transfer
-	defer CheckClose(in2, &err)
+	in2 = accounting.NewAccount(in2, src).WithBuffer() // account and buffer the transfer
+	defer fs.CheckClose(in2, &err)
 
 	return CheckEqualReaders(in1, in2)
 }
 
 // CheckDownload checks the files in fsrc and fdst according to Size
 // and the actual contents of the files.
-func CheckDownload(fdst, fsrc Fs) error {
-	check := func(a, b Object) (differ bool, noHash bool) {
+func CheckDownload(fdst, fsrc fs.Fs) error {
+	check := func(a, b fs.Object) (differ bool, noHash bool) {
 		differ, err := CheckIdentical(a, b)
 		if err != nil {
-			Stats.Error(err)
-			Errorf(a, "Failed to download: %v", err)
+			fs.CountError(err)
+			fs.Errorf(a, "Failed to download: %v", err)
 			return true, true
 		}
 		return differ, false
@@ -992,8 +762,8 @@ func CheckDownload(fdst, fsrc Fs) error {
 // ListFn lists the Fs to the supplied function
 //
 // Lists in parallel which may get them out of order
-func ListFn(f Fs, fn func(Object)) error {
-	return Walk(f, "", false, Config.MaxDepth, func(dirPath string, entries DirEntries, err error) error {
+func ListFn(f fs.Fs, fn func(fs.Object)) error {
+	return walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
 			// FIXME count errors and carry on for listing
 			return err
@@ -1020,8 +790,8 @@ func syncFprintf(w io.Writer, format string, a ...interface{}) {
 // Shows size and path - obeys includes and excludes
 //
 // Lists in parallel which may get them out of order
-func List(f Fs, w io.Writer) error {
-	return ListFn(f, func(o Object) {
+func List(f fs.Fs, w io.Writer) error {
+	return ListFn(f, func(o fs.Object) {
 		syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
 	})
 }
@@ -1031,11 +801,11 @@ func List(f Fs, w io.Writer) error {
 // Shows size, mod time and path - obeys includes and excludes
 //
 // Lists in parallel which may get them out of order
-func ListLong(f Fs, w io.Writer) error {
-	return ListFn(f, func(o Object) {
-		Stats.Checking(o.Remote())
+func ListLong(f fs.Fs, w io.Writer) error {
+	return ListFn(f, func(o fs.Object) {
+		accounting.Stats.Checking(o.Remote())
 		modTime := o.ModTime()
-		Stats.DoneChecking(o.Remote())
+		accounting.Stats.DoneChecking(o.Remote())
 		syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
 	})
 }
@@ -1046,8 +816,8 @@ func ListLong(f Fs, w io.Writer) error {
 // excludes
 //
 // Lists in parallel which may get them out of order
-func Md5sum(f Fs, w io.Writer) error {
-	return hashLister(HashMD5, f, w)
+func Md5sum(f fs.Fs, w io.Writer) error {
+	return hashLister(hash.HashMD5, f, w)
 }
 
 // Sha1sum list the Fs to the supplied writer
@@ -1055,8 +825,8 @@ func Md5sum(f Fs, w io.Writer) error {
 // Obeys includes and excludes
 //
 // Lists in parallel which may get them out of order
-func Sha1sum(f Fs, w io.Writer) error {
-	return hashLister(HashSHA1, f, w)
+func Sha1sum(f fs.Fs, w io.Writer) error {
+	return hashLister(hash.HashSHA1, f, w)
 }
 
 // DropboxHashSum list the Fs to the supplied writer
@@ -1064,37 +834,37 @@ func Sha1sum(f Fs, w io.Writer) error {
 // Obeys includes and excludes
 //
 // Lists in parallel which may get them out of order
-func DropboxHashSum(f Fs, w io.Writer) error {
-	return hashLister(HashDropbox, f, w)
+func DropboxHashSum(f fs.Fs, w io.Writer) error {
+	return hashLister(hash.HashDropbox, f, w)
 }
 
 // hashSum returns the human readable hash for ht passed in.  This may
 // be UNSUPPORTED or ERROR.
-func hashSum(ht HashType, o Object) string {
-	Stats.Checking(o.Remote())
+func hashSum(ht hash.Type, o fs.Object) string {
+	accounting.Stats.Checking(o.Remote())
 	sum, err := o.Hash(ht)
-	Stats.DoneChecking(o.Remote())
-	if err == ErrHashUnsupported {
+	accounting.Stats.DoneChecking(o.Remote())
+	if err == hash.ErrHashUnsupported {
 		sum = "UNSUPPORTED"
 	} else if err != nil {
-		Debugf(o, "Failed to read %v: %v", ht, err)
+		fs.Debugf(o, "Failed to read %v: %v", ht, err)
 		sum = "ERROR"
 	}
 	return sum
 }
 
-func hashLister(ht HashType, f Fs, w io.Writer) error {
-	return ListFn(f, func(o Object) {
+func hashLister(ht hash.Type, f fs.Fs, w io.Writer) error {
+	return ListFn(f, func(o fs.Object) {
 		sum := hashSum(ht, o)
-		syncFprintf(w, "%*s  %s\n", HashWidth[ht], sum, o.Remote())
+		syncFprintf(w, "%*s  %s\n", hash.HashWidth[ht], sum, o.Remote())
 	})
 }
 
 // Count counts the objects and their sizes in the Fs
 //
 // Obeys includes and excludes
-func Count(f Fs) (objects int64, size int64, err error) {
-	err = ListFn(f, func(o Object) {
+func Count(f fs.Fs) (objects int64, size int64, err error) {
+	err = ListFn(f, func(o fs.Object) {
 		atomic.AddInt64(&objects, 1)
 		atomic.AddInt64(&size, o.Size())
 	})
@@ -1103,7 +873,7 @@ func Count(f Fs) (objects int64, size int64, err error) {
 
 // ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
 func ConfigMaxDepth(recursive bool) int {
-	depth := Config.MaxDepth
+	depth := fs.Config.MaxDepth
 	if !recursive && depth < 0 {
 		depth = 1
 	}
@@ -1111,13 +881,13 @@ func ConfigMaxDepth(recursive bool) int {
 }
 
 // ListDir lists the directories/buckets/containers in the Fs to the supplied writer
-func ListDir(f Fs, w io.Writer) error {
-	return Walk(f, "", false, ConfigMaxDepth(false), func(dirPath string, entries DirEntries, err error) error {
+func ListDir(f fs.Fs, w io.Writer) error {
+	return walk.Walk(f, "", false, ConfigMaxDepth(false), func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
 			// FIXME count errors and carry on for listing
 			return err
 		}
-		entries.ForDir(func(dir Directory) {
+		entries.ForDir(func(dir fs.Directory) {
 			if dir != nil {
 				syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
 			}
@@ -1126,24 +896,16 @@ func ListDir(f Fs, w io.Writer) error {
 	})
 }
 
-// logDirName returns an object for the logger
-func logDirName(f Fs, dir string) interface{} {
-	if dir != "" {
-		return dir
-	}
-	return f
-}
-
 // Mkdir makes a destination directory or container
-func Mkdir(f Fs, dir string) error {
-	if Config.DryRun {
-		Logf(logDirName(f, dir), "Not making directory as dry run is set")
+func Mkdir(f fs.Fs, dir string) error {
+	if fs.Config.DryRun {
+		fs.Logf(fs.LogDirName(f, dir), "Not making directory as dry run is set")
 		return nil
 	}
-	Debugf(logDirName(f, dir), "Making directory")
+	fs.Debugf(fs.LogDirName(f, dir), "Making directory")
 	err := f.Mkdir(dir)
 	if err != nil {
-		Stats.Error(err)
+		fs.CountError(err)
 		return err
 	}
 	return nil
@@ -1151,36 +913,36 @@ func Mkdir(f Fs, dir string) error {
 
 // TryRmdir removes a container but not if not empty.  It doesn't
 // count errors but may return one.
-func TryRmdir(f Fs, dir string) error {
-	if Config.DryRun {
-		Logf(logDirName(f, dir), "Not deleting as dry run is set")
+func TryRmdir(f fs.Fs, dir string) error {
+	if fs.Config.DryRun {
+		fs.Logf(fs.LogDirName(f, dir), "Not deleting as dry run is set")
 		return nil
 	}
-	Debugf(logDirName(f, dir), "Removing directory")
+	fs.Debugf(fs.LogDirName(f, dir), "Removing directory")
 	return f.Rmdir(dir)
 }
 
 // Rmdir removes a container but not if not empty
-func Rmdir(f Fs, dir string) error {
+func Rmdir(f fs.Fs, dir string) error {
 	err := TryRmdir(f, dir)
 	if err != nil {
-		Stats.Error(err)
+		fs.CountError(err)
 		return err
 	}
 	return err
 }
 
 // Purge removes a container and all of its contents
-func Purge(f Fs) error {
+func Purge(f fs.Fs) error {
 	doFallbackPurge := true
 	var err error
 	if doPurge := f.Features().Purge; doPurge != nil {
 		doFallbackPurge = false
-		if Config.DryRun {
-			Logf(f, "Not purging as --dry-run set")
+		if fs.Config.DryRun {
+			fs.Logf(f, "Not purging as --dry-run set")
 		} else {
 			err = doPurge()
-			if err == ErrorCantPurge {
+			if err == fs.ErrorCantPurge {
 				doFallbackPurge = true
 			}
 		}
@@ -1194,7 +956,7 @@ func Purge(f Fs) error {
 		err = Rmdirs(f, "", false)
 	}
 	if err != nil {
-		Stats.Error(err)
+		fs.CountError(err)
 		return err
 	}
 	return nil
@@ -1202,13 +964,13 @@ func Purge(f Fs) error {
 
 // Delete removes all the contents of a container.  Unlike Purge, it
 // obeys includes and excludes.
-func Delete(f Fs) error {
-	delete := make(ObjectsChan, Config.Transfers)
+func Delete(f fs.Fs) error {
+	delete := make(fs.ObjectsChan, fs.Config.Transfers)
 	delErr := make(chan error, 1)
 	go func() {
 		delErr <- DeleteFiles(delete)
 	}()
-	err := ListFn(f, func(o Object) {
+	err := ListFn(f, func(o fs.Object) {
 		delete <- o
 	})
 	close(delete)
@@ -1220,7 +982,7 @@ func Delete(f Fs) error {
 }
 
 // dedupeRename renames the objs slice to different names
-func dedupeRename(remote string, objs []Object) {
+func dedupeRename(remote string, objs []fs.Object) {
 	f := objs[0].Fs()
 	doMove := f.Features().Move
 	if doMove == nil {
@@ -1230,37 +992,37 @@ func dedupeRename(remote string, objs []Object) {
 	base := remote[:len(remote)-len(ext)]
 	for i, o := range objs {
 		newName := fmt.Sprintf("%s-%d%s", base, i+1, ext)
-		if !Config.DryRun {
+		if !fs.Config.DryRun {
 			newObj, err := doMove(o, newName)
 			if err != nil {
-				Stats.Error(err)
-				Errorf(o, "Failed to rename: %v", err)
+				fs.CountError(err)
+				fs.Errorf(o, "Failed to rename: %v", err)
 				continue
 			}
-			Infof(newObj, "renamed from: %v", o)
+			fs.Infof(newObj, "renamed from: %v", o)
 		} else {
-			Logf(remote, "Not renaming to %q as --dry-run", newName)
+			fs.Logf(remote, "Not renaming to %q as --dry-run", newName)
 		}
 	}
 }
 
 // dedupeDeleteAllButOne deletes all but the one in keep
-func dedupeDeleteAllButOne(keep int, remote string, objs []Object) {
+func dedupeDeleteAllButOne(keep int, remote string, objs []fs.Object) {
 	for i, o := range objs {
 		if i == keep {
 			continue
 		}
 		_ = DeleteFile(o)
 	}
-	Logf(remote, "Deleted %d extra copies", len(objs)-1)
+	fs.Logf(remote, "Deleted %d extra copies", len(objs)-1)
 }
 
 // dedupeDeleteIdentical deletes all but one of identical (by hash) copies
-func dedupeDeleteIdentical(remote string, objs []Object) []Object {
+func dedupeDeleteIdentical(remote string, objs []fs.Object) []fs.Object {
 	// See how many of these duplicates are identical
-	byHash := make(map[string][]Object, len(objs))
+	byHash := make(map[string][]fs.Object, len(objs))
 	for _, o := range objs {
-		md5sum, err := o.Hash(HashMD5)
+		md5sum, err := o.Hash(hash.HashMD5)
 		if err == nil {
 			byHash[md5sum] = append(byHash[md5sum], o)
 		}
@@ -1270,7 +1032,7 @@ func dedupeDeleteIdentical(remote string, objs []Object) []Object {
 	objs = nil
 	for md5sum, hashObjs := range byHash {
 		if len(hashObjs) > 1 {
-			Logf(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum)
+			fs.Logf(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum)
 			for _, o := range hashObjs[1:] {
 				_ = DeleteFile(o)
 			}
@@ -1282,26 +1044,26 @@ func dedupeDeleteIdentical(remote string, objs []Object) []Object {
 }
 
 // dedupeInteractive interactively dedupes the slice of objects
-func dedupeInteractive(remote string, objs []Object) {
+func dedupeInteractive(remote string, objs []fs.Object) {
 	fmt.Printf("%s: %d duplicates remain\n", remote, len(objs))
 	for i, o := range objs {
-		md5sum, err := o.Hash(HashMD5)
+		md5sum, err := o.Hash(hash.HashMD5)
 		if err != nil {
 			md5sum = err.Error()
 		}
 		fmt.Printf("  %d: %12d bytes, %s, md5sum %32s\n", i+1, o.Size(), o.ModTime().Format("2006-01-02 15:04:05.000000000"), md5sum)
 	}
-	switch Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) {
+	switch config.Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) {
 	case 's':
 	case 'k':
-		keep := ChooseNumber("Enter the number of the file to keep", 1, len(objs))
+		keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs))
 		dedupeDeleteAllButOne(keep-1, remote, objs)
 	case 'r':
 		dedupeRename(remote, objs)
 	}
 }
 
-type objectsSortedByModTime []Object
+type objectsSortedByModTime []fs.Object
 
 func (objs objectsSortedByModTime) Len() int      { return len(objs) }
 func (objs objectsSortedByModTime) Swap(i, j int) { objs[i], objs[j] = objs[j], objs[i] }
@@ -1370,14 +1132,14 @@ func (x *DeduplicateMode) Type() string {
 var _ pflag.Value = (*DeduplicateMode)(nil)
 
 // dedupeFindDuplicateDirs scans f for duplicate directories
-func dedupeFindDuplicateDirs(f Fs) ([][]Directory, error) {
-	duplicateDirs := [][]Directory{}
-	err := Walk(f, "", true, Config.MaxDepth, func(dirPath string, entries DirEntries, err error) error {
+func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) {
+	duplicateDirs := [][]fs.Directory{}
+	err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
 			return err
 		}
-		dirs := map[string][]Directory{}
-		entries.ForDir(func(d Directory) {
+		dirs := map[string][]fs.Directory{}
+		entries.ForDir(func(d fs.Directory) {
 			dirs[d.Remote()] = append(dirs[d.Remote()], d)
 		})
 		for _, ds := range dirs {
@@ -1394,7 +1156,7 @@ func dedupeFindDuplicateDirs(f Fs) ([][]Directory, error) {
 }
 
 // dedupeMergeDuplicateDirs merges all the duplicate directories found
-func dedupeMergeDuplicateDirs(f Fs, duplicateDirs [][]Directory) error {
+func dedupeMergeDuplicateDirs(f fs.Fs, duplicateDirs [][]fs.Directory) error {
 	mergeDirs := f.Features().MergeDirs
 	if mergeDirs == nil {
 		return errors.Errorf("%v: can't merge directories", f)
@@ -1404,14 +1166,14 @@ func dedupeMergeDuplicateDirs(f Fs, duplicateDirs [][]Directory) error {
 		return errors.Errorf("%v: can't flush dir cache", f)
 	}
 	for _, dirs := range duplicateDirs {
-		if !Config.DryRun {
-			Infof(dirs[0], "Merging contents of duplicate directories")
+		if !fs.Config.DryRun {
+			fs.Infof(dirs[0], "Merging contents of duplicate directories")
 			err := mergeDirs(dirs)
 			if err != nil {
 				return errors.Wrap(err, "merge duplicate dirs")
 			}
 		} else {
-			Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
+			fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
 		}
 	}
 	dirCacheFlush()
@@ -1421,8 +1183,8 @@ func dedupeMergeDuplicateDirs(f Fs, duplicateDirs [][]Directory) error {
 // Deduplicate interactively finds duplicate files and offers to
 // delete all but one or rename them to be different. Only useful with
 // Google Drive which can have duplicate file names.
-func Deduplicate(f Fs, mode DeduplicateMode) error {
-	Infof(f, "Looking for duplicates using %v mode.", mode)
+func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
+	fs.Infof(f, "Looking for duplicates using %v mode.", mode)
 
 	// Find duplicate directories first and fix them - repeat
 	// until all fixed
@@ -1438,18 +1200,18 @@ func Deduplicate(f Fs, mode DeduplicateMode) error {
 		if err != nil {
 			return err
 		}
-		if Config.DryRun {
+		if fs.Config.DryRun {
 			break
 		}
 	}
 
 	// Now find duplicate files
-	files := map[string][]Object{}
-	err := Walk(f, "", true, Config.MaxDepth, func(dirPath string, entries DirEntries, err error) error {
+	files := map[string][]fs.Object{}
+	err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
 			return err
 		}
-		entries.ForObject(func(o Object) {
+		entries.ForObject(func(o fs.Object) {
 			remote := o.Remote()
 			files[remote] = append(files[remote], o)
 		})
@@ -1460,10 +1222,10 @@ func Deduplicate(f Fs, mode DeduplicateMode) error {
 	}
 	for remote, objs := range files {
 		if len(objs) > 1 {
-			Logf(remote, "Found %d duplicates - deleting identical copies", len(objs))
+			fs.Logf(remote, "Found %d duplicates - deleting identical copies", len(objs))
 			objs = dedupeDeleteIdentical(remote, objs)
 			if len(objs) <= 1 {
-				Logf(remote, "All duplicates removed")
+				fs.Logf(remote, "All duplicates removed")
 				continue
 			}
 			switch mode {
@@ -1495,21 +1257,21 @@ func Deduplicate(f Fs, mode DeduplicateMode) error {
 // channel.
 //
 // If the error was ErrorDirNotFound then it will be ignored
-func listToChan(f Fs) ObjectsChan {
-	o := make(ObjectsChan, Config.Checkers)
+func listToChan(f fs.Fs) fs.ObjectsChan {
+	o := make(fs.ObjectsChan, fs.Config.Checkers)
 	go func() {
 		defer close(o)
-		_ = Walk(f, "", true, Config.MaxDepth, func(dirPath string, entries DirEntries, err error) error {
+		_ = walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
 			if err != nil {
-				if err == ErrorDirNotFound {
+				if err == fs.ErrorDirNotFound {
 					return nil
 				}
 				err = errors.Errorf("Failed to list: %v", err)
-				Stats.Error(err)
-				Errorf(nil, "%v", err)
+				fs.CountError(err)
+				fs.Errorf(nil, "%v", err)
 				return nil
 			}
-			entries.ForObject(func(obj Object) {
+			entries.ForObject(func(obj fs.Object) {
 				o <- obj
 			})
 			return nil
@@ -1519,13 +1281,13 @@ func listToChan(f Fs) ObjectsChan {
 }
 
 // CleanUp removes the trash for the Fs
-func CleanUp(f Fs) error {
+func CleanUp(f fs.Fs) error {
 	doCleanUp := f.Features().CleanUp
 	if doCleanUp == nil {
 		return errors.Errorf("%v doesn't support cleanup", f)
 	}
-	if Config.DryRun {
-		Logf(f, "Not running cleanup as --dry-run set")
+	if fs.Config.DryRun {
+		fs.Logf(f, "Not running cleanup as --dry-run set")
 		return nil
 	}
 	return doCleanUp()
@@ -1545,13 +1307,13 @@ type readCloser struct {
 //
 // if count < 0 then it will be ignored
 // if count >= 0 then only that many characters will be output
-func Cat(f Fs, w io.Writer, offset, count int64) error {
+func Cat(f fs.Fs, w io.Writer, offset, count int64) error {
 	var mu sync.Mutex
-	return ListFn(f, func(o Object) {
+	return ListFn(f, func(o fs.Object) {
 		var err error
-		Stats.Transferring(o.Remote())
+		accounting.Stats.Transferring(o.Remote())
 		defer func() {
-			Stats.DoneTransferring(o.Remote(), err == nil)
+			accounting.Stats.DoneTransferring(o.Remote(), err == nil)
 		}()
 		size := o.Size()
 		thisOffset := offset
@@ -1560,14 +1322,14 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
 		}
 		// size remaining is now reduced by thisOffset
 		size -= thisOffset
-		var options []OpenOption
+		var options []fs.OpenOption
 		if thisOffset > 0 {
-			options = append(options, &SeekOption{Offset: thisOffset})
+			options = append(options, &fs.SeekOption{Offset: thisOffset})
 		}
 		in, err := o.Open(options...)
 		if err != nil {
-			Stats.Error(err)
-			Errorf(o, "Failed to open: %v", err)
+			fs.CountError(err)
+			fs.Errorf(o, "Failed to open: %v", err)
 			return
 		}
 		if count >= 0 {
@@ -1577,12 +1339,12 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
 				size = count
 			}
 		}
-		in = NewAccountSizeName(in, size, o.Remote()).WithBuffer() // account and buffer the transfer
+		in = accounting.NewAccountSizeName(in, size, o.Remote()).WithBuffer() // account and buffer the transfer
 		defer func() {
 			err = in.Close()
 			if err != nil {
-				Stats.Error(err)
-				Errorf(o, "Failed to close: %v", err)
+				fs.CountError(err)
+				fs.Errorf(o, "Failed to close: %v", err)
 			}
 		}()
 		// take the lock just before we output stuff, so at the last possible moment
@@ -1590,47 +1352,47 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
 		defer mu.Unlock()
 		_, err = io.Copy(w, in)
 		if err != nil {
-			Stats.Error(err)
-			Errorf(o, "Failed to send to output: %v", err)
+			fs.CountError(err)
+			fs.Errorf(o, "Failed to send to output: %v", err)
 		}
 	})
 }
 
 // Rcat reads data from the Reader until EOF and uploads it to a file on remote
-func Rcat(fdst Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst Object, err error) {
-	Stats.Transferring(dstFileName)
-	in = NewAccountSizeName(in, -1, dstFileName).WithBuffer()
+func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) {
+	accounting.Stats.Transferring(dstFileName)
+	in = accounting.NewAccountSizeName(in, -1, dstFileName).WithBuffer()
 	defer func() {
-		Stats.DoneTransferring(dstFileName, err == nil)
+		accounting.Stats.DoneTransferring(dstFileName, err == nil)
 		if otherErr := in.Close(); otherErr != nil {
-			Debugf(fdst, "Rcat: failed to close source: %v", err)
+			fs.Debugf(fdst, "Rcat: failed to close source: %v", err)
 		}
 	}()
 
-	hashOption := &HashesOption{Hashes: fdst.Hashes()}
-	hash, err := NewMultiHasherTypes(fdst.Hashes())
+	hashOption := &fs.HashesOption{Hashes: fdst.Hashes()}
+	hash, err := hash.NewMultiHasherTypes(fdst.Hashes())
 	if err != nil {
 		return nil, err
 	}
-	readCounter := NewCountingReader(in)
+	readCounter := readers.NewCountingReader(in)
 	trackingIn := io.TeeReader(readCounter, hash)
 
-	compare := func(dst Object) error {
-		src := NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst)
+	compare := func(dst fs.Object) error {
+		src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst)
 		if !Equal(src, dst) {
 			err = errors.Errorf("corrupted on transfer")
-			Stats.Error(err)
-			Errorf(dst, "%v", err)
+			fs.CountError(err)
+			fs.Errorf(dst, "%v", err)
 			return err
 		}
 		return nil
 	}
 
 	// check if file small enough for direct upload
-	buf := make([]byte, Config.StreamingUploadCutoff)
+	buf := make([]byte, fs.Config.StreamingUploadCutoff)
 	if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF {
-		Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n)
-		src := NewMemoryObject(dstFileName, modTime, buf[:n])
+		fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n)
+		src := object.NewMemoryObject(dstFileName, modTime, buf[:n])
 		return Copy(fdst, nil, dstFileName, src)
 	}
 
@@ -1643,28 +1405,28 @@ func Rcat(fdst Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst
 	fStreamTo := fdst
 	canStream := fdst.Features().PutStream != nil
 	if !canStream {
-		Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file")
-		tmpLocalFs, err := temporaryLocalFs()
+		fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file")
+		tmpLocalFs, err := fs.TemporaryLocalFs()
 		if err != nil {
 			return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
 		}
 		defer func() {
 			err := Purge(tmpLocalFs)
 			if err != nil {
-				Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err)
+				fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err)
 			}
 		}()
 		fStreamTo = tmpLocalFs
 	}
 
-	if Config.DryRun {
-		Logf("stdin", "Not uploading as --dry-run")
+	if fs.Config.DryRun {
+		fs.Logf("stdin", "Not uploading as --dry-run")
 		// prevents "broken pipe" errors
 		_, err = io.Copy(ioutil.Discard, in)
 		return nil, err
 	}
 
-	objInfo := NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil)
+	objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil)
 	if dst, err = fStreamTo.Features().PutStream(in, objInfo, hashOption); err != nil {
 		return dst, err
 	}
@@ -1680,25 +1442,25 @@ func Rcat(fdst Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst
 
 // Rmdirs removes any empty directories (or directories only
 // containing empty directories) under f, including f.
-func Rmdirs(f Fs, dir string, leaveRoot bool) error {
+func Rmdirs(f fs.Fs, dir string, leaveRoot bool) error {
 	dirEmpty := make(map[string]bool)
 	dirEmpty[""] = !leaveRoot
-	err := Walk(f, dir, true, Config.MaxDepth, func(dirPath string, entries DirEntries, err error) error {
+	err := walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
-			Stats.Error(err)
-			Errorf(f, "Failed to list %q: %v", dirPath, err)
+			fs.CountError(err)
+			fs.Errorf(f, "Failed to list %q: %v", dirPath, err)
 			return nil
 		}
 		for _, entry := range entries {
 			switch x := entry.(type) {
-			case Directory:
+			case fs.Directory:
 				// add a new directory as empty
 				dir := x.Remote()
 				_, found := dirEmpty[dir]
 				if !found {
 					dirEmpty[dir] = true
 				}
-			case Object:
+			case fs.Object:
 				// mark the parents of the file as being non-empty
 				dir := x.Remote()
 				for dir != "" {
@@ -1732,20 +1494,76 @@ func Rmdirs(f Fs, dir string, leaveRoot bool) error {
 		dir := toDelete[i]
 		err := TryRmdir(f, dir)
 		if err != nil {
-			Stats.Error(err)
-			Errorf(dir, "Failed to rmdir: %v", err)
+			fs.CountError(err)
+			fs.Errorf(dir, "Failed to rmdir: %v", err)
 			return err
 		}
 	}
 	return nil
 }
 
+// NeedTransfer checks to see if src needs to be copied to dst using
+// the current config.
+//
+// Returns a flag which indicates whether the file needs to be
+// transferred or not.
+func NeedTransfer(dst, src fs.Object) bool {
+	if dst == nil {
+		fs.Debugf(src, "Couldn't find file - need to transfer")
+		return true
+	}
+	// If we should ignore existing files, don't transfer
+	if fs.Config.IgnoreExisting {
+		fs.Debugf(src, "Destination exists, skipping")
+		return false
+	}
+	// If we should upload unconditionally
+	if fs.Config.IgnoreTimes {
+		fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use")
+		return true
+	}
+	// If UpdateOlder is in effect, skip if dst is newer than src
+	if fs.Config.UpdateOlder {
+		srcModTime := src.ModTime()
+		dstModTime := dst.ModTime()
+		dt := dstModTime.Sub(srcModTime)
+		// If have a mutually agreed precision then use that
+		modifyWindow := fs.Config.ModifyWindow
+		if modifyWindow == fs.ModTimeNotSupported {
+			// Otherwise use 1 second as a safe default as
+			// the resolution of the time a file was
+			// uploaded.
+			modifyWindow = time.Second
+		}
+		switch {
+		case dt >= modifyWindow:
+			fs.Debugf(src, "Destination is newer than source, skipping")
+			return false
+		case dt <= -modifyWindow:
+			fs.Debugf(src, "Destination is older than source, transferring")
+		default:
+			if src.Size() == dst.Size() {
+				fs.Debugf(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
+				return false
+			}
+			fs.Debugf(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
+		}
+	} else {
+		// Check to see if changed or not
+		if Equal(src, dst) {
+			fs.Debugf(src, "Unchanged skipping")
+			return false
+		}
+	}
+	return true
+}
+
 // moveOrCopyFile moves or copies a single file possibly to a new name
-func moveOrCopyFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string, cp bool) (err error) {
+func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
 	dstFilePath := path.Join(fdst.Root(), dstFileName)
 	srcFilePath := path.Join(fsrc.Root(), srcFileName)
 	if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath {
-		Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName)
+		fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName)
 		return nil
 	}
 
@@ -1763,33 +1581,33 @@ func moveOrCopyFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string, cp
 
 	// Find dst object if it exists
 	dstObj, err := fdst.NewObject(dstFileName)
-	if err == ErrorObjectNotFound {
+	if err == fs.ErrorObjectNotFound {
 		dstObj = nil
 	} else if err != nil {
 		return err
 	}
 
 	if NeedTransfer(dstObj, srcObj) {
-		Stats.Transferring(srcFileName)
+		accounting.Stats.Transferring(srcFileName)
 		_, err = Op(fdst, dstObj, dstFileName, srcObj)
-		Stats.DoneTransferring(srcFileName, err == nil)
+		accounting.Stats.DoneTransferring(srcFileName, err == nil)
 	} else {
-		Stats.Checking(srcFileName)
+		accounting.Stats.Checking(srcFileName)
 		if !cp {
 			err = DeleteFile(srcObj)
 		}
-		defer Stats.DoneChecking(srcFileName)
+		defer accounting.Stats.DoneChecking(srcFileName)
 	}
 	return err
 }
 
 // MoveFile moves a single file possibly to a new name
-func MoveFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string) (err error) {
+func MoveFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
 	return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, false)
 }
 
 // CopyFile moves a single file possibly to a new name
-func CopyFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string) (err error) {
+func CopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
 	return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, true)
 }
 
@@ -1798,7 +1616,7 @@ type ListFormat struct {
 	separator string
 	dirSlash  bool
 	output    []func() string
-	entry     DirEntry
+	entry     fs.DirEntry
 	hash      bool
 }
 
@@ -1832,7 +1650,7 @@ func (l *ListFormat) AddSize() {
 // AddPath adds path to file to output
 func (l *ListFormat) AddPath() {
 	l.AppendOutput(func() string {
-		_, isDir := l.entry.(Directory)
+		_, isDir := l.entry.(fs.Directory)
 
 		if isDir && l.dirSlash {
 			return l.entry.Remote() + "/"
@@ -1842,9 +1660,9 @@ func (l *ListFormat) AddPath() {
 }
 
 // AddHash adds the hash of the type given to the output
-func (l *ListFormat) AddHash(ht HashType) {
+func (l *ListFormat) AddHash(ht hash.Type) {
 	l.AppendOutput(func() string {
-		o, ok := l.entry.(Object)
+		o, ok := l.entry.(fs.Object)
 		if !ok {
 			return ""
 		}
@@ -1861,7 +1679,7 @@ func (l *ListFormat) AppendOutput(functionToAppend func() string) {
 }
 
 // ListFormatted prints information about specific file in specific format
-func ListFormatted(entry *DirEntry, list *ListFormat) string {
+func ListFormatted(entry *fs.DirEntry, list *ListFormat) string {
 	list.entry = *entry
 	var out string
 	for _, fun := range list.output {
diff --git a/fs/operations/operations_internal_test.go b/fs/operations/operations_internal_test.go
new file mode 100644
index 000000000..039a47788
--- /dev/null
+++ b/fs/operations/operations_internal_test.go
@@ -0,0 +1,3 @@
+// Internal tests for operations
+
+package operations
diff --git a/fs/operations_test.go b/fs/operations/operations_test.go
similarity index 73%
rename from fs/operations_test.go
rename to fs/operations/operations_test.go
index 9ad0c47e4..b71c81220 100644
--- a/fs/operations_test.go
+++ b/fs/operations/operations_test.go
@@ -14,10 +14,10 @@
 // fstest.CheckItems() before use.  This make sure the directory
 // listing is now consistent and stops cascading errors.
 //
-// Call fs.Stats.ResetCounters() before every fs.Sync() as it uses the
-// error count internally.
+// Call accounting.Stats.ResetCounters() before every fs.Sync() as it
+// uses the error count internally.
 
-package fs_test
+package operations_test
 
 import (
 	"bytes"
@@ -32,6 +32,12 @@ import (
 
 	_ "github.com/ncw/rclone/backend/all" // import all backends
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/list"
+	"github.com/ncw/rclone/fs/operations"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/rclone/fstest"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -52,7 +58,13 @@ func TestMain(m *testing.M) {
 func TestMkdir(t *testing.T) {
 	r := fstest.NewRun(t)
 	defer r.Finalise()
-	fstest.TestMkdir(t, r.Fremote)
+
+	err := operations.Mkdir(r.Fremote, "")
+	require.NoError(t, err)
+	fstest.CheckListing(t, r.Fremote, []fstest.Item{})
+
+	err = operations.Mkdir(r.Fremote, "")
+	require.NoError(t, err)
 }
 
 func TestLsd(t *testing.T) {
@@ -63,7 +75,7 @@ func TestLsd(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	var buf bytes.Buffer
-	err := fs.ListDir(r.Fremote, &buf)
+	err := operations.ListDir(r.Fremote, &buf)
 	require.NoError(t, err)
 	res := buf.String()
 	assert.Contains(t, res, "sub dir\n")
@@ -78,7 +90,7 @@ func TestLs(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1, file2)
 
 	var buf bytes.Buffer
-	err := fs.List(r.Fremote, &buf)
+	err := operations.List(r.Fremote, &buf)
 	require.NoError(t, err)
 	res := buf.String()
 	assert.Contains(t, res, "        0 empty space\n")
@@ -94,7 +106,7 @@ func TestLsLong(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1, file2)
 
 	var buf bytes.Buffer
-	err := fs.ListLong(r.Fremote, &buf)
+	err := operations.ListLong(r.Fremote, &buf)
 	require.NoError(t, err)
 	res := buf.String()
 	lines := strings.Split(strings.Trim(res, "\n"), "\n")
@@ -141,7 +153,7 @@ func TestHashSums(t *testing.T) {
 	// MD5 Sum
 
 	var buf bytes.Buffer
-	err := fs.Md5sum(r.Fremote, &buf)
+	err := operations.Md5sum(r.Fremote, &buf)
 	require.NoError(t, err)
 	res := buf.String()
 	if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e  empty space\n") &&
@@ -158,7 +170,7 @@ func TestHashSums(t *testing.T) {
 	// SHA1 Sum
 
 	buf.Reset()
-	err = fs.Sha1sum(r.Fremote, &buf)
+	err = operations.Sha1sum(r.Fremote, &buf)
 	require.NoError(t, err)
 	res = buf.String()
 	if !strings.Contains(res, "da39a3ee5e6b4b0d3255bfef95601890afd80709  empty space\n") &&
@@ -175,7 +187,7 @@ func TestHashSums(t *testing.T) {
 	// Dropbox Hash Sum
 
 	buf.Reset()
-	err = fs.DropboxHashSum(r.Fremote, &buf)
+	err = operations.DropboxHashSum(r.Fremote, &buf)
 	require.NoError(t, err)
 	res = buf.String()
 	if !strings.Contains(res, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855  empty space\n") &&
@@ -203,7 +215,7 @@ func TestCount(t *testing.T) {
 	fs.Config.MaxDepth = 1
 	defer func() { fs.Config.MaxDepth = -1 }()
 
-	objects, size, err := fs.Count(r.Fremote)
+	objects, size, err := operations.Count(r.Fremote)
 	require.NoError(t, err)
 	assert.Equal(t, int64(2), objects)
 	assert.Equal(t, int64(60), size)
@@ -217,12 +229,12 @@ func TestDelete(t *testing.T) {
 	file3 := r.WriteObject("large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
 	fstest.CheckItems(t, r.Fremote, file1, file2, file3)
 
-	fs.Config.Filter.MaxSize = 60
+	filter.Active.Opt.MaxSize = 60
 	defer func() {
-		fs.Config.Filter.MaxSize = -1
+		filter.Active.Opt.MaxSize = -1
 	}()
 
-	err := fs.Delete(r.Fremote)
+	err := operations.Delete(r.Fremote)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Fremote, file3)
 }
@@ -233,9 +245,9 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs) error) {
 
 	check := func(i int, wantErrors int64) {
 		fs.Debugf(r.Fremote, "%d: Starting check test", i)
-		oldErrors := fs.Stats.GetErrors()
+		oldErrors := accounting.Stats.GetErrors()
 		err := checkFunction(r.Flocal, r.Fremote)
-		gotErrors := fs.Stats.GetErrors() - oldErrors
+		gotErrors := accounting.Stats.GetErrors() - oldErrors
 		if wantErrors == 0 && err != nil {
 			t.Errorf("%d: Got error when not expecting one: %v", i, err)
 		}
@@ -276,11 +288,11 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs) error) {
 }
 
 func TestCheck(t *testing.T) {
-	testCheck(t, fs.Check)
+	testCheck(t, operations.Check)
 }
 
 func TestCheckDownload(t *testing.T) {
-	testCheck(t, fs.CheckDownload)
+	testCheck(t, operations.CheckDownload)
 }
 
 func TestCheckSizeOnly(t *testing.T) {
@@ -296,7 +308,7 @@ func skipIfCantDedupe(t *testing.T, f fs.Fs) {
 	if !f.Features().DuplicateFiles {
 		t.Skip("Can't test deduplicate - no duplicate files possible")
 	}
-	if !f.Hashes().Contains(fs.HashMD5) {
+	if !f.Hashes().Contains(hash.HashMD5) {
 		t.Skip("Can't test deduplicate - MD5 not supported")
 	}
 }
@@ -311,7 +323,7 @@ func TestDeduplicateInteractive(t *testing.T) {
 	file3 := r.WriteUncheckedObject("one", "This is one", t1)
 	r.CheckWithDuplicates(t, file1, file2, file3)
 
-	err := fs.Deduplicate(r.Fremote, fs.DeduplicateInteractive)
+	err := operations.Deduplicate(r.Fremote, operations.DeduplicateInteractive)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Fremote, file1)
@@ -327,7 +339,7 @@ func TestDeduplicateSkip(t *testing.T) {
 	file3 := r.WriteUncheckedObject("one", "This is another one", t1)
 	r.CheckWithDuplicates(t, file1, file2, file3)
 
-	err := fs.Deduplicate(r.Fremote, fs.DeduplicateSkip)
+	err := operations.Deduplicate(r.Fremote, operations.DeduplicateSkip)
 	require.NoError(t, err)
 
 	r.CheckWithDuplicates(t, file1, file3)
@@ -343,10 +355,10 @@ func TestDeduplicateFirst(t *testing.T) {
 	file3 := r.WriteUncheckedObject("one", "This is one BB", t1)
 	r.CheckWithDuplicates(t, file1, file2, file3)
 
-	err := fs.Deduplicate(r.Fremote, fs.DeduplicateFirst)
+	err := operations.Deduplicate(r.Fremote, operations.DeduplicateFirst)
 	require.NoError(t, err)
 
-	objects, size, err := fs.Count(r.Fremote)
+	objects, size, err := operations.Count(r.Fremote)
 	require.NoError(t, err)
 	assert.Equal(t, int64(1), objects)
 	if size != file1.Size && size != file2.Size && size != file3.Size {
@@ -364,7 +376,7 @@ func TestDeduplicateNewest(t *testing.T) {
 	file3 := r.WriteUncheckedObject("one", "This is another one", t3)
 	r.CheckWithDuplicates(t, file1, file2, file3)
 
-	err := fs.Deduplicate(r.Fremote, fs.DeduplicateNewest)
+	err := operations.Deduplicate(r.Fremote, operations.DeduplicateNewest)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Fremote, file3)
@@ -380,7 +392,7 @@ func TestDeduplicateOldest(t *testing.T) {
 	file3 := r.WriteUncheckedObject("one", "This is another one", t3)
 	r.CheckWithDuplicates(t, file1, file2, file3)
 
-	err := fs.Deduplicate(r.Fremote, fs.DeduplicateOldest)
+	err := operations.Deduplicate(r.Fremote, operations.DeduplicateOldest)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Fremote, file1)
@@ -396,10 +408,10 @@ func TestDeduplicateRename(t *testing.T) {
 	file3 := r.WriteUncheckedObject("one.txt", "This is another one", t3)
 	r.CheckWithDuplicates(t, file1, file2, file3)
 
-	err := fs.Deduplicate(r.Fremote, fs.DeduplicateRename)
+	err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename)
 	require.NoError(t, err)
 
-	require.NoError(t, fs.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
+	require.NoError(t, walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
 			return err
 		}
@@ -434,7 +446,7 @@ func TestMergeDirs(t *testing.T) {
 	file2 := r.WriteObject("dupe2/two.txt", "This is one too", t2)
 	file3 := r.WriteObject("dupe3/three.txt", "This is another one", t3)
 
-	objs, dirs, err := fs.WalkGetAll(r.Fremote, "", true, 1)
+	objs, dirs, err := walk.GetAll(r.Fremote, "", true, 1)
 	require.NoError(t, err)
 	assert.Equal(t, 3, len(dirs))
 	assert.Equal(t, 0, len(objs))
@@ -446,7 +458,7 @@ func TestMergeDirs(t *testing.T) {
 	file3.Path = "dupe1/three.txt"
 	fstest.CheckItems(t, r.Fremote, file1, file2, file3)
 
-	objs, dirs, err = fs.WalkGetAll(r.Fremote, "", true, 1)
+	objs, dirs, err = walk.GetAll(r.Fremote, "", true, 1)
 	require.NoError(t, err)
 	assert.Equal(t, 1, len(dirs))
 	assert.Equal(t, 0, len(objs))
@@ -473,7 +485,7 @@ func TestCat(t *testing.T) {
 		{1, 3, "BCD", "123"},
 	} {
 		var buf bytes.Buffer
-		err := fs.Cat(r.Fremote, &buf, test.offset, test.count)
+		err := operations.Cat(r.Fremote, &buf, test.offset, test.count)
 		require.NoError(t, err)
 		res := buf.String()
 
@@ -506,11 +518,11 @@ func TestRcat(t *testing.T) {
 		path2 := prefix + "big_file_from_pipe"
 
 		in := ioutil.NopCloser(strings.NewReader(data1))
-		_, err := fs.Rcat(r.Fremote, path1, in, t1)
+		_, err := operations.Rcat(r.Fremote, path1, in, t1)
 		require.NoError(t, err)
 
 		in = ioutil.NopCloser(strings.NewReader(data2))
-		_, err = fs.Rcat(r.Fremote, path2, in, t2)
+		_, err = operations.Rcat(r.Fremote, path2, in, t2)
 		require.NoError(t, err)
 
 		file1 := fstest.NewItem(path1, data1, t1)
@@ -531,13 +543,13 @@ func TestRmdirsNoLeaveRoot(t *testing.T) {
 	r.ForceMkdir(r.Fremote)
 	file1 := r.WriteObject("A1/B1/C1/one", "aaa", t1)
 	//..and dirs we expect to delete
-	require.NoError(t, fs.Mkdir(r.Fremote, "A2"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A1/B2"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A1/B2/C2"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A1/B1/C3"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A3"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A3/B3"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A3/B3/C4"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A2"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2/C2"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A3"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3/C4"))
 	//..and one more file at the end
 	file2 := r.WriteObject("A1/two", "bbb", t2)
 
@@ -562,7 +574,7 @@ func TestRmdirsNoLeaveRoot(t *testing.T) {
 		fs.Config.ModifyWindow,
 	)
 
-	require.NoError(t, fs.Rmdirs(r.Fremote, "", false))
+	require.NoError(t, operations.Rmdirs(r.Fremote, "", false))
 
 	fstest.CheckListingWithPrecision(
 		t,
@@ -587,9 +599,9 @@ func TestRmdirsLeaveRoot(t *testing.T) {
 
 	r.ForceMkdir(r.Fremote)
 
-	require.NoError(t, fs.Mkdir(r.Fremote, "A1"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A1/B1"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "A1/B1/C1"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A1"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C1"))
 
 	fstest.CheckListingWithPrecision(
 		t,
@@ -603,7 +615,7 @@ func TestRmdirsLeaveRoot(t *testing.T) {
 		fs.Config.ModifyWindow,
 	)
 
-	require.NoError(t, fs.Rmdirs(r.Fremote, "A1", true))
+	require.NoError(t, operations.Rmdirs(r.Fremote, "A1", true))
 
 	fstest.CheckListingWithPrecision(
 		t,
@@ -626,7 +638,7 @@ func TestMoveFile(t *testing.T) {
 	file2 := file1
 	file2.Path = "sub/file2"
 
-	err := fs.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
+	err := operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal)
 	fstest.CheckItems(t, r.Fremote, file2)
@@ -634,12 +646,12 @@ func TestMoveFile(t *testing.T) {
 	r.WriteFile("file1", "file1 contents", t1)
 	fstest.CheckItems(t, r.Flocal, file1)
 
-	err = fs.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
+	err = operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal)
 	fstest.CheckItems(t, r.Fremote, file2)
 
-	err = fs.MoveFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
+	err = operations.MoveFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal)
 	fstest.CheckItems(t, r.Fremote, file2)
@@ -655,17 +667,17 @@ func TestCopyFile(t *testing.T) {
 	file2 := file1
 	file2.Path = "sub/file2"
 
-	err := fs.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
+	err := operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file2)
 
-	err = fs.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
+	err = operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file2)
 
-	err = fs.CopyFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
+	err = operations.CopyFile(r.Fremote, r.Fremote, file2.Path, file2.Path)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file2)
@@ -677,7 +689,7 @@ type testFsInfo struct {
 	root      string
 	stringVal string
 	precision time.Duration
-	hashes    fs.HashSet
+	hashes    hash.Set
 	features  fs.Features
 }
 
@@ -694,7 +706,7 @@ func (i *testFsInfo) String() string { return i.stringVal }
 func (i *testFsInfo) Precision() time.Duration { return i.precision }
 
 // Returns the supported hash types of the filesystem
-func (i *testFsInfo) Hashes() fs.HashSet { return i.hashes }
+func (i *testFsInfo) Hashes() hash.Set { return i.hashes }
 
 // Returns the supported hash types of the filesystem
 func (i *testFsInfo) Features() *fs.Features { return &i.features }
@@ -712,9 +724,9 @@ func TestSameConfig(t *testing.T) {
 		{"namey", "roott", false},
 	} {
 		b := &testFsInfo{name: test.name, root: test.root}
-		actual := fs.SameConfig(a, b)
+		actual := operations.SameConfig(a, b)
 		assert.Equal(t, test.expected, actual)
-		actual = fs.SameConfig(b, a)
+		actual = operations.SameConfig(b, a)
 		assert.Equal(t, test.expected, actual)
 	}
 }
@@ -732,9 +744,9 @@ func TestSame(t *testing.T) {
 		{"namey", "roott", false},
 	} {
 		b := &testFsInfo{name: test.name, root: test.root}
-		actual := fs.Same(a, b)
+		actual := operations.Same(a, b)
 		assert.Equal(t, test.expected, actual)
-		actual = fs.Same(b, a)
+		actual = operations.Same(b, a)
 		assert.Equal(t, test.expected, actual)
 	}
 }
@@ -758,137 +770,13 @@ func TestOverlapping(t *testing.T) {
 	} {
 		b := &testFsInfo{name: test.name, root: test.root}
 		what := fmt.Sprintf("(%q,%q) vs (%q,%q)", a.name, a.root, b.name, b.root)
-		actual := fs.Overlapping(a, b)
+		actual := operations.Overlapping(a, b)
 		assert.Equal(t, test.expected, actual, what)
-		actual = fs.Overlapping(b, a)
+		actual = operations.Overlapping(b, a)
 		assert.Equal(t, test.expected, actual, what)
 	}
 }
 
-func TestListDirSorted(t *testing.T) {
-	r := fstest.NewRun(t)
-	defer r.Finalise()
-
-	fs.Config.Filter.MaxSize = 10
-	defer func() {
-		fs.Config.Filter.MaxSize = -1
-	}()
-
-	files := []fstest.Item{
-		r.WriteObject("a.txt", "hello world", t1),
-		r.WriteObject("zend.txt", "hello", t1),
-		r.WriteObject("sub dir/hello world", "hello world", t1),
-		r.WriteObject("sub dir/hello world2", "hello world", t1),
-		r.WriteObject("sub dir/ignore dir/.ignore", "", t1),
-		r.WriteObject("sub dir/ignore dir/should be ignored", "to ignore", t1),
-		r.WriteObject("sub dir/sub sub dir/hello world3", "hello world", t1),
-	}
-	fstest.CheckItems(t, r.Fremote, files...)
-	var items fs.DirEntries
-	var err error
-
-	// Turn the DirEntry into a name, ending with a / if it is a
-	// dir
-	str := func(i int) string {
-		item := items[i]
-		name := item.Remote()
-		switch item.(type) {
-		case fs.Object:
-		case fs.Directory:
-			name += "/"
-		default:
-			t.Fatalf("Unknown type %+v", item)
-		}
-		return name
-	}
-
-	items, err = fs.ListDirSorted(r.Fremote, true, "")
-	require.NoError(t, err)
-	require.Len(t, items, 3)
-	assert.Equal(t, "a.txt", str(0))
-	assert.Equal(t, "sub dir/", str(1))
-	assert.Equal(t, "zend.txt", str(2))
-
-	items, err = fs.ListDirSorted(r.Fremote, false, "")
-	require.NoError(t, err)
-	require.Len(t, items, 2)
-	assert.Equal(t, "sub dir/", str(0))
-	assert.Equal(t, "zend.txt", str(1))
-
-	items, err = fs.ListDirSorted(r.Fremote, true, "sub dir")
-	require.NoError(t, err)
-	require.Len(t, items, 4)
-	assert.Equal(t, "sub dir/hello world", str(0))
-	assert.Equal(t, "sub dir/hello world2", str(1))
-	assert.Equal(t, "sub dir/ignore dir/", str(2))
-	assert.Equal(t, "sub dir/sub sub dir/", str(3))
-
-	items, err = fs.ListDirSorted(r.Fremote, false, "sub dir")
-	require.NoError(t, err)
-	require.Len(t, items, 2)
-	assert.Equal(t, "sub dir/ignore dir/", str(0))
-	assert.Equal(t, "sub dir/sub sub dir/", str(1))
-
-	// testing ignore file
-	fs.Config.Filter.ExcludeFile = ".ignore"
-
-	items, err = fs.ListDirSorted(r.Fremote, false, "sub dir")
-	require.NoError(t, err)
-	require.Len(t, items, 1)
-	assert.Equal(t, "sub dir/sub sub dir/", str(0))
-
-	items, err = fs.ListDirSorted(r.Fremote, false, "sub dir/ignore dir")
-	require.NoError(t, err)
-	require.Len(t, items, 0)
-
-	items, err = fs.ListDirSorted(r.Fremote, true, "sub dir/ignore dir")
-	require.NoError(t, err)
-	require.Len(t, items, 2)
-	assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
-	assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
-
-	fs.Config.Filter.ExcludeFile = ""
-	items, err = fs.ListDirSorted(r.Fremote, false, "sub dir/ignore dir")
-	require.NoError(t, err)
-	require.Len(t, items, 2)
-	assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
-	assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
-}
-
-type byteReader struct {
-	c byte
-}
-
-func (br *byteReader) Read(p []byte) (n int, err error) {
-	if br.c == 0 {
-		err = io.EOF
-	} else if len(p) >= 1 {
-		p[0] = br.c
-		n = 1
-		br.c--
-	}
-	return
-}
-
-func TestReadFill(t *testing.T) {
-	buf := []byte{9, 9, 9, 9, 9}
-
-	n, err := fs.ReadFill(&byteReader{0}, buf)
-	assert.Equal(t, io.EOF, err)
-	assert.Equal(t, 0, n)
-	assert.Equal(t, []byte{9, 9, 9, 9, 9}, buf)
-
-	n, err = fs.ReadFill(&byteReader{3}, buf)
-	assert.Equal(t, io.EOF, err)
-	assert.Equal(t, 3, n)
-	assert.Equal(t, []byte{3, 2, 1, 9, 9}, buf)
-
-	n, err = fs.ReadFill(&byteReader{8}, buf)
-	assert.Equal(t, nil, err)
-	assert.Equal(t, 5, n)
-	assert.Equal(t, []byte{8, 7, 6, 5, 4}, buf)
-}
-
 type errorReader struct {
 	err error
 }
@@ -903,19 +791,19 @@ func TestCheckEqualReaders(t *testing.T) {
 	b65b[len(b65b)-1] = 1
 	b66 := make([]byte, 66*1024)
 
-	differ, err := fs.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
+	differ, err := operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
 	assert.NoError(t, err)
 	assert.Equal(t, differ, false)
 
-	differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
+	differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
 	assert.NoError(t, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
+	differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
 	assert.NoError(t, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
+	differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
 	assert.NoError(t, err)
 	assert.Equal(t, differ, true)
 
@@ -926,35 +814,35 @@ func TestCheckEqualReaders(t *testing.T) {
 		return io.MultiReader(r, e)
 	}
 
-	differ, err = fs.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
+	differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
+	differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
+	differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
+	differ, err = operations.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
+	differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
+	differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
+	differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 
-	differ, err = fs.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
+	differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
 	assert.Equal(t, myErr, err)
 	assert.Equal(t, differ, true)
 }
@@ -967,50 +855,50 @@ func TestListFormat(t *testing.T) {
 
 	fstest.CheckItems(t, r.Fremote, file1, file2)
 
-	items, _ := fs.ListDirSorted(r.Fremote, true, "")
-	var list fs.ListFormat
+	items, _ := list.DirSorted(r.Fremote, true, "")
+	var list operations.ListFormat
 	list.AddPath()
 	list.SetDirSlash(false)
-	assert.Equal(t, "subdir", fs.ListFormatted(&items[1], &list))
+	assert.Equal(t, "subdir", operations.ListFormatted(&items[1], &list))
 
 	list.SetDirSlash(true)
-	assert.Equal(t, "subdir/", fs.ListFormatted(&items[1], &list))
+	assert.Equal(t, "subdir/", operations.ListFormatted(&items[1], &list))
 
 	list.SetOutput(nil)
-	assert.Equal(t, "", fs.ListFormatted(&items[1], &list))
+	assert.Equal(t, "", operations.ListFormatted(&items[1], &list))
 
 	list.AppendOutput(func() string { return "a" })
 	list.AppendOutput(func() string { return "b" })
-	assert.Equal(t, "ab", fs.ListFormatted(&items[1], &list))
+	assert.Equal(t, "ab", operations.ListFormatted(&items[1], &list))
 	list.SetSeparator(":::")
-	assert.Equal(t, "a:::b", fs.ListFormatted(&items[1], &list))
+	assert.Equal(t, "a:::b", operations.ListFormatted(&items[1], &list))
 
 	list.SetOutput(nil)
 	list.AddModTime()
-	assert.Equal(t, items[0].ModTime().Format("2006-01-02 15:04:05"), fs.ListFormatted(&items[0], &list))
+	assert.Equal(t, items[0].ModTime().Format("2006-01-02 15:04:05"), operations.ListFormatted(&items[0], &list))
 
 	list.SetOutput(nil)
 	list.AddSize()
-	assert.Equal(t, "1", fs.ListFormatted(&items[0], &list))
+	assert.Equal(t, "1", operations.ListFormatted(&items[0], &list))
 
 	list.AddPath()
 	list.AddModTime()
 	list.SetDirSlash(true)
 	list.SetSeparator("__SEP__")
-	assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Format("2006-01-02 15:04:05"), fs.ListFormatted(&items[0], &list))
-	assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Format("2006-01-02 15:04:05"), fs.ListFormatted(&items[1], &list))
+	assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Format("2006-01-02 15:04:05"), operations.ListFormatted(&items[0], &list))
+	assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Format("2006-01-02 15:04:05"), operations.ListFormatted(&items[1], &list))
 
 	for _, test := range []struct {
-		ht   fs.HashType
+		ht   hash.Type
 		want string
 	}{
-		{fs.HashMD5, "0cc175b9c0f1b6a831c399e269772661"},
-		{fs.HashSHA1, "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"},
-		{fs.HashDropbox, "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8"},
+		{hash.HashMD5, "0cc175b9c0f1b6a831c399e269772661"},
+		{hash.HashSHA1, "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"},
+		{hash.HashDropbox, "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8"},
 	} {
 		list.SetOutput(nil)
 		list.AddHash(test.ht)
-		got := fs.ListFormatted(&items[0], &list)
+		got := operations.ListFormatted(&items[0], &list)
 		if got != "UNSUPPORTED" && got != "" {
 			assert.Equal(t, test.want, got)
 		}
diff --git a/fs/operations_internal_test.go b/fs/operations_internal_test.go
deleted file mode 100644
index d9745ca6f..000000000
--- a/fs/operations_internal_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Internal tests for operations
-
-package fs
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-)
-
-func TestFilterAndSortIncludeAll(t *testing.T) {
-	da := newDir("a")
-	oA := mockObject("A")
-	db := newDir("b")
-	oB := mockObject("B")
-	dc := newDir("c")
-	oC := mockObject("C")
-	dd := newDir("d")
-	oD := mockObject("D")
-	entries := DirEntries{da, oA, db, oB, dc, oC, dd, oD}
-	includeObject := func(o Object) bool {
-		return o != oB
-	}
-	includeDirectory := func(remote string) (bool, error) {
-		return remote != "c", nil
-	}
-	// no filter
-	newEntries, err := filterAndSortDir(entries, true, "", includeObject, includeDirectory)
-	require.NoError(t, err)
-	assert.Equal(t,
-		newEntries,
-		DirEntries{oA, oB, oC, oD, da, db, dc, dd},
-	)
-	// filter
-	newEntries, err = filterAndSortDir(entries, false, "", includeObject, includeDirectory)
-	require.NoError(t, err)
-	assert.Equal(t,
-		newEntries,
-		DirEntries{oA, oC, oD, da, db, dd},
-	)
-}
-
-func TestFilterAndSortCheckDir(t *testing.T) {
-	// Check the different kinds of error when listing "dir"
-	da := newDir("dir/")
-	oA := mockObject("diR/a")
-	db := newDir("dir/b")
-	oB := mockObject("dir/B/sub")
-	dc := newDir("dir/c")
-	oC := mockObject("dir/C")
-	dd := newDir("dir/d")
-	oD := mockObject("dir/D")
-	entries := DirEntries{da, oA, db, oB, dc, oC, dd, oD}
-	newEntries, err := filterAndSortDir(entries, true, "dir", nil, nil)
-	require.NoError(t, err)
-	assert.Equal(t,
-		newEntries,
-		DirEntries{oC, oD, db, dc, dd},
-	)
-}
-
-func TestFilterAndSortCheckDirRoot(t *testing.T) {
-	// Check the different kinds of error when listing the root ""
-	da := newDir("")
-	oA := mockObject("A")
-	db := newDir("b")
-	oB := mockObject("B/sub")
-	dc := newDir("c")
-	oC := mockObject("C")
-	dd := newDir("d")
-	oD := mockObject("D")
-	entries := DirEntries{da, oA, db, oB, dc, oC, dd, oD}
-	newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
-	require.NoError(t, err)
-	assert.Equal(t,
-		newEntries,
-		DirEntries{oA, oC, oD, db, dc, dd},
-	)
-}
-
-func TestFilterAndSortUnknown(t *testing.T) {
-	// Check that an unknown entry produces an error
-	da := newDir("")
-	oA := mockObject("A")
-	ub := unknownDirEntry("b")
-	oB := mockObject("B/sub")
-	entries := DirEntries{da, oA, ub, oB}
-	newEntries, err := filterAndSortDir(entries, true, "", nil, nil)
-	assert.Error(t, err, "error")
-	assert.Nil(t, newEntries)
-}
diff --git a/fs/options.go b/fs/options.go
index 0d6f97962..0e3fa9a07 100644
--- a/fs/options.go
+++ b/fs/options.go
@@ -6,6 +6,8 @@ import (
 	"fmt"
 	"net/http"
 	"strconv"
+
+	"github.com/ncw/rclone/fs/hash"
 )
 
 // OpenOption is an interface describing options for Open
@@ -97,7 +99,7 @@ func (o *HTTPOption) Mandatory() bool {
 // HashesOption defines an option used to tell the local fs to limit
 // the number of hashes it calculates.
 type HashesOption struct {
-	Hashes HashSet
+	Hashes hash.Set
 }
 
 // Header formats the option as an http header
diff --git a/fs/parseduration.go b/fs/parseduration.go
new file mode 100644
index 000000000..679cb0921
--- /dev/null
+++ b/fs/parseduration.go
@@ -0,0 +1,68 @@
+package fs
+
+import (
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Duration is a time.Duration with some more parsing options
+type Duration time.Duration
+
+// Turn Duration into a string
+func (d Duration) String() string {
+	return time.Duration(d).String()
+}
+
+// We use time conventions
+var ageSuffixes = []struct {
+	Suffix     string
+	Multiplier time.Duration
+}{
+	{Suffix: "ms", Multiplier: time.Millisecond},
+	{Suffix: "s", Multiplier: time.Second},
+	{Suffix: "m", Multiplier: time.Minute},
+	{Suffix: "h", Multiplier: time.Hour},
+	{Suffix: "d", Multiplier: time.Hour * 24},
+	{Suffix: "w", Multiplier: time.Hour * 24 * 7},
+	{Suffix: "M", Multiplier: time.Hour * 24 * 30},
+	{Suffix: "y", Multiplier: time.Hour * 24 * 365},
+
+	// Default to second
+	{Suffix: "", Multiplier: time.Second},
+}
+
+// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
+func ParseDuration(age string) (time.Duration, error) {
+	var period float64
+
+	for _, ageSuffix := range ageSuffixes {
+		if strings.HasSuffix(age, ageSuffix.Suffix) {
+			numberString := age[:len(age)-len(ageSuffix.Suffix)]
+			var err error
+			period, err = strconv.ParseFloat(numberString, 64)
+			if err != nil {
+				return time.Duration(0), err
+			}
+			period *= float64(ageSuffix.Multiplier)
+			break
+		}
+	}
+
+	return time.Duration(period), nil
+}
+
+// Set a Duration
+func (d *Duration) Set(s string) error {
+	duration, err := ParseDuration(s)
+	if err != nil {
+		return err
+	}
+	*d = Duration(duration)
+	return nil
+}
+
+// Type of the value
+func (d Duration) Type() string {
+	return "time.Duration"
+}
diff --git a/fs/parseduration_test.go b/fs/parseduration_test.go
new file mode 100644
index 000000000..7c9d7240b
--- /dev/null
+++ b/fs/parseduration_test.go
@@ -0,0 +1,44 @@
+package fs
+
+import (
+	"testing"
+	"time"
+
+	"github.com/spf13/pflag"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Check it satisfies the interface
+var _ pflag.Value = (*Duration)(nil)
+
+func TestParseDuration(t *testing.T) {
+	for _, test := range []struct {
+		in   string
+		want float64
+		err  bool
+	}{
+		{"0", 0, false},
+		{"", 0, true},
+		{"1ms", float64(time.Millisecond), false},
+		{"1s", float64(time.Second), false},
+		{"1m", float64(time.Minute), false},
+		{"1h", float64(time.Hour), false},
+		{"1d", float64(time.Hour) * 24, false},
+		{"1w", float64(time.Hour) * 24 * 7, false},
+		{"1M", float64(time.Hour) * 24 * 30, false},
+		{"1y", float64(time.Hour) * 24 * 365, false},
+		{"1.5y", float64(time.Hour) * 24 * 365 * 1.5, false},
+		{"-1s", -float64(time.Second), false},
+		{"1.s", float64(time.Second), false},
+		{"1x", 0, true},
+	} {
+		duration, err := ParseDuration(test.in)
+		if test.err {
+			require.Error(t, err)
+		} else {
+			require.NoError(t, err)
+		}
+		assert.Equal(t, test.want, float64(duration))
+	}
+}
diff --git a/fs/sizesuffix.go b/fs/sizesuffix.go
new file mode 100644
index 000000000..14652c3d5
--- /dev/null
+++ b/fs/sizesuffix.go
@@ -0,0 +1,102 @@
+package fs
+
+// SizeSuffix is parsed by flag with k/M/G suffixes
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// SizeSuffix is an int64 with a friendly way of printing setting
+type SizeSuffix int64
+
+// Turn SizeSuffix into a string and a suffix
+func (x SizeSuffix) string() (string, string) {
+	scaled := float64(0)
+	suffix := ""
+	switch {
+	case x < 0:
+		return "off", ""
+	case x == 0:
+		return "0", ""
+	case x < 1024:
+		scaled = float64(x)
+		suffix = ""
+	case x < 1024*1024:
+		scaled = float64(x) / 1024
+		suffix = "k"
+	case x < 1024*1024*1024:
+		scaled = float64(x) / 1024 / 1024
+		suffix = "M"
+	default:
+		scaled = float64(x) / 1024 / 1024 / 1024
+		suffix = "G"
+	}
+	if math.Floor(scaled) == scaled {
+		return fmt.Sprintf("%.0f", scaled), suffix
+	}
+	return fmt.Sprintf("%.3f", scaled), suffix
+}
+
+// String turns SizeSuffix into a string
+func (x SizeSuffix) String() string {
+	val, suffix := x.string()
+	return val + suffix
+}
+
+// Unit turns SizeSuffix into a string with a unit
+func (x SizeSuffix) Unit(unit string) string {
+	val, suffix := x.string()
+	if val == "off" {
+		return val
+	}
+	return val + " " + suffix + unit
+}
+
+// Set a SizeSuffix
+func (x *SizeSuffix) Set(s string) error {
+	if len(s) == 0 {
+		return errors.New("empty string")
+	}
+	if strings.ToLower(s) == "off" {
+		*x = -1
+		return nil
+	}
+	suffix := s[len(s)-1]
+	suffixLen := 1
+	var multiplier float64
+	switch suffix {
+	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
+		suffixLen = 0
+		multiplier = 1 << 10
+	case 'b', 'B':
+		multiplier = 1
+	case 'k', 'K':
+		multiplier = 1 << 10
+	case 'm', 'M':
+		multiplier = 1 << 20
+	case 'g', 'G':
+		multiplier = 1 << 30
+	default:
+		return errors.Errorf("bad suffix %q", suffix)
+	}
+	s = s[:len(s)-suffixLen]
+	value, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return err
+	}
+	if value < 0 {
+		return errors.Errorf("size can't be negative %q", s)
+	}
+	value *= multiplier
+	*x = SizeSuffix(value)
+	return nil
+}
+
+// Type of the value
+func (x *SizeSuffix) Type() string {
+	return "int64"
+}
diff --git a/fs/sizesuffix_test.go b/fs/sizesuffix_test.go
new file mode 100644
index 000000000..45f8baa15
--- /dev/null
+++ b/fs/sizesuffix_test.go
@@ -0,0 +1,90 @@
+package fs
+
+import (
+	"testing"
+
+	"github.com/spf13/pflag"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// Check it satisfies the interface
+var _ pflag.Value = (*SizeSuffix)(nil)
+
+func TestSizeSuffixString(t *testing.T) {
+	for _, test := range []struct {
+		in   float64
+		want string
+	}{
+		{0, "0"},
+		{102, "102"},
+		{1024, "1k"},
+		{1024 * 1024, "1M"},
+		{1024 * 1024 * 1024, "1G"},
+		{10 * 1024 * 1024 * 1024, "10G"},
+		{10.1 * 1024 * 1024 * 1024, "10.100G"},
+		{-1, "off"},
+		{-100, "off"},
+	} {
+		ss := SizeSuffix(test.in)
+		got := ss.String()
+		assert.Equal(t, test.want, got)
+	}
+}
+
+func TestSizeSuffixUnit(t *testing.T) {
+	for _, test := range []struct {
+		in   float64
+		want string
+	}{
+		{0, "0 Bytes"},
+		{102, "102 Bytes"},
+		{1024, "1 kBytes"},
+		{1024 * 1024, "1 MBytes"},
+		{1024 * 1024 * 1024, "1 GBytes"},
+		{10 * 1024 * 1024 * 1024, "10 GBytes"},
+		{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
+		{-1, "off"},
+		{-100, "off"},
+	} {
+		ss := SizeSuffix(test.in)
+		got := ss.Unit("Bytes")
+		assert.Equal(t, test.want, got)
+	}
+}
+
+func TestSizeSuffixSet(t *testing.T) {
+	for _, test := range []struct {
+		in   string
+		want int64
+		err  bool
+	}{
+		{"0", 0, false},
+		{"1b", 1, false},
+		{"102B", 102, false},
+		{"0.1k", 102, false},
+		{"0.1", 102, false},
+		{"1K", 1024, false},
+		{"1", 1024, false},
+		{"2.5", 1024 * 2.5, false},
+		{"1M", 1024 * 1024, false},
+		{"1.g", 1024 * 1024 * 1024, false},
+		{"10G", 10 * 1024 * 1024 * 1024, false},
+		{"off", -1, false},
+		{"OFF", -1, false},
+		{"", 0, true},
+		{"1p", 0, true},
+		{"1.p", 0, true},
+		{"1p", 0, true},
+		{"-1K", 0, true},
+	} {
+		ss := SizeSuffix(0)
+		err := ss.Set(test.in)
+		if test.err {
+			require.Error(t, err)
+		} else {
+			require.NoError(t, err)
+		}
+		assert.Equal(t, test.want, int64(ss))
+	}
+}
diff --git a/fs/sync.go b/fs/sync/sync.go
similarity index 52%
rename from fs/sync.go
rename to fs/sync/sync.go
index 31918c1a3..66d84c6c6 100644
--- a/fs/sync.go
+++ b/fs/sync/sync.go
@@ -1,65 +1,68 @@
-// Implementation of sync/copy/move
-
-package fs
+// Package sync is the implementation of sync/copy/move
+package sync
 
 import (
 	"fmt"
 	"sort"
 	"sync"
-	"time"
 
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/march"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 )
 
-var oldSyncMethod = BoolP("old-sync-method", "", false, "Deprecated - use --fast-list instead")
-
 type syncCopyMove struct {
 	// parameters
-	fdst               Fs
-	fsrc               Fs
-	deleteMode         DeleteMode // how we are doing deletions
+	fdst               fs.Fs
+	fsrc               fs.Fs
+	deleteMode         fs.DeleteMode // how we are doing deletions
 	DoMove             bool
 	deleteEmptySrcDirs bool
 	dir                string
 	// internal state
-	ctx            context.Context     // internal context for controlling go-routines
-	cancel         func()              // cancel the context
-	noTraverse     bool                // if set don't trafevers the dst
-	deletersWg     sync.WaitGroup      // for delete before go routine
-	deleteFilesCh  chan Object         // channel to receive deletes if delete before
-	trackRenames   bool                // set if we should do server side renames
-	dstFilesMu     sync.Mutex          // protect dstFiles
-	dstFiles       map[string]Object   // dst files, always filled
-	srcFiles       map[string]Object   // src files, only used if deleteBefore
-	srcFilesChan   chan Object         // passes src objects
-	srcFilesResult chan error          // error result of src listing
-	dstFilesResult chan error          // error result of dst listing
-	dstEmptyDirsMu sync.Mutex          // protect dstEmptyDirs
-	dstEmptyDirs   []DirEntry          // potentially empty directories
-	srcEmptyDirsMu sync.Mutex          // protect srcEmptyDirs
-	srcEmptyDirs   []DirEntry          // potentially empty directories
-	checkerWg      sync.WaitGroup      // wait for checkers
-	toBeChecked    ObjectPairChan      // checkers channel
-	transfersWg    sync.WaitGroup      // wait for transfers
-	toBeUploaded   ObjectPairChan      // copiers channel
-	errorMu        sync.Mutex          // Mutex covering the errors variables
-	err            error               // normal error from copy process
-	noRetryErr     error               // error with NoRetry set
-	fatalErr       error               // fatal error
-	commonHash     HashType            // common hash type between src and dst
-	renameMapMu    sync.Mutex          // mutex to protect the below
-	renameMap      map[string][]Object // dst files by hash - only used by trackRenames
-	renamerWg      sync.WaitGroup      // wait for renamers
-	toBeRenamed    ObjectPairChan      // renamers channel
-	trackRenamesWg sync.WaitGroup      // wg for background track renames
-	trackRenamesCh chan Object         // objects are pumped in here
-	renameCheck    []Object            // accumulate files to check for rename here
-	backupDir      Fs                  // place to store overwrites/deletes
-	suffix         string              // suffix to add to files placed in backupDir
+	ctx            context.Context        // internal context for controlling go-routines
+	cancel         func()                 // cancel the context
+	noTraverse     bool                   // if set don't trafevers the dst
+	deletersWg     sync.WaitGroup         // for delete before go routine
+	deleteFilesCh  chan fs.Object         // channel to receive deletes if delete before
+	trackRenames   bool                   // set if we should do server side renames
+	dstFilesMu     sync.Mutex             // protect dstFiles
+	dstFiles       map[string]fs.Object   // dst files, always filled
+	srcFiles       map[string]fs.Object   // src files, only used if deleteBefore
+	srcFilesChan   chan fs.Object         // passes src objects
+	srcFilesResult chan error             // error result of src listing
+	dstFilesResult chan error             // error result of dst listing
+	dstEmptyDirsMu sync.Mutex             // protect dstEmptyDirs
+	dstEmptyDirs   []fs.DirEntry          // potentially empty directories
+	srcEmptyDirsMu sync.Mutex             // protect srcEmptyDirs
+	srcEmptyDirs   []fs.DirEntry          // potentially empty directories
+	checkerWg      sync.WaitGroup         // wait for checkers
+	toBeChecked    fs.ObjectPairChan      // checkers channel
+	transfersWg    sync.WaitGroup         // wait for transfers
+	toBeUploaded   fs.ObjectPairChan      // copiers channel
+	errorMu        sync.Mutex             // Mutex covering the errors variables
+	err            error                  // normal error from copy process
+	noRetryErr     error                  // error with NoRetry set
+	fatalErr       error                  // fatal error
+	commonHash     hash.Type              // common hash type between src and dst
+	renameMapMu    sync.Mutex             // mutex to protect the below
+	renameMap      map[string][]fs.Object // dst files by hash - only used by trackRenames
+	renamerWg      sync.WaitGroup         // wait for renamers
+	toBeRenamed    fs.ObjectPairChan      // renamers channel
+	trackRenamesWg sync.WaitGroup         // wg for background track renames
+	trackRenamesCh chan fs.Object         // objects are pumped in here
+	renameCheck    []fs.Object            // accumulate files to check for rename here
+	backupDir      fs.Fs                  // place to store overwrites/deletes
+	suffix         string                 // suffix to add to files placed in backupDir
 }
 
-func newSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
+func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
 	s := &syncCopyMove{
 		fdst:               fdst,
 		fsrc:               fsrc,
@@ -67,64 +70,64 @@ func newSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEm
 		DoMove:             DoMove,
 		deleteEmptySrcDirs: deleteEmptySrcDirs,
 		dir:                "",
-		srcFilesChan:       make(chan Object, Config.Checkers+Config.Transfers),
+		srcFilesChan:       make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
 		srcFilesResult:     make(chan error, 1),
 		dstFilesResult:     make(chan error, 1),
-		noTraverse:         Config.NoTraverse,
-		toBeChecked:        make(ObjectPairChan, Config.Transfers),
-		toBeUploaded:       make(ObjectPairChan, Config.Transfers),
-		deleteFilesCh:      make(chan Object, Config.Checkers),
-		trackRenames:       Config.TrackRenames,
+		noTraverse:         fs.Config.NoTraverse,
+		toBeChecked:        make(fs.ObjectPairChan, fs.Config.Transfers),
+		toBeUploaded:       make(fs.ObjectPairChan, fs.Config.Transfers),
+		deleteFilesCh:      make(chan fs.Object, fs.Config.Checkers),
+		trackRenames:       fs.Config.TrackRenames,
 		commonHash:         fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
-		toBeRenamed:        make(ObjectPairChan, Config.Transfers),
-		trackRenamesCh:     make(chan Object, Config.Checkers),
+		toBeRenamed:        make(fs.ObjectPairChan, fs.Config.Transfers),
+		trackRenamesCh:     make(chan fs.Object, fs.Config.Checkers),
 	}
 	s.ctx, s.cancel = context.WithCancel(context.Background())
-	if s.noTraverse && s.deleteMode != DeleteModeOff {
-		Errorf(nil, "Ignoring --no-traverse with sync")
+	if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
+		fs.Errorf(nil, "Ignoring --no-traverse with sync")
 		s.noTraverse = false
 	}
 	if s.trackRenames {
 		// Don't track renames for remotes without server-side move support.
-		if !CanServerSideMove(fdst) {
-			Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
+		if !operations.CanServerSideMove(fdst) {
+			fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
 			s.trackRenames = false
 		}
-		if s.commonHash == HashNone {
-			Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
+		if s.commonHash == hash.HashNone {
+			fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
 			s.trackRenames = false
 		}
 	}
 	if s.trackRenames {
 		// track renames needs delete after
-		if s.deleteMode != DeleteModeOff {
-			s.deleteMode = DeleteModeAfter
+		if s.deleteMode != fs.DeleteModeOff {
+			s.deleteMode = fs.DeleteModeAfter
 		}
 		if s.noTraverse {
-			Errorf(nil, "Ignoring --no-traverse with --track-renames")
+			fs.Errorf(nil, "Ignoring --no-traverse with --track-renames")
 			s.noTraverse = false
 		}
 	}
 	// Make Fs for --backup-dir if required
-	if Config.BackupDir != "" {
+	if fs.Config.BackupDir != "" {
 		var err error
-		s.backupDir, err = NewFs(Config.BackupDir)
+		s.backupDir, err = fs.NewFs(fs.Config.BackupDir)
 		if err != nil {
-			return nil, FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", Config.BackupDir, err))
+			return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err))
 		}
-		if !CanServerSideMove(s.backupDir) {
-			return nil, FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
+		if !operations.CanServerSideMove(s.backupDir) {
+			return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
 		}
-		if !SameConfig(fdst, s.backupDir) {
-			return nil, FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
+		if !operations.SameConfig(fdst, s.backupDir) {
+			return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
 		}
-		if Overlapping(fdst, s.backupDir) {
-			return nil, FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
+		if operations.Overlapping(fdst, s.backupDir) {
+			return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
 		}
-		if Overlapping(fsrc, s.backupDir) {
-			return nil, FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
+		if operations.Overlapping(fsrc, s.backupDir) {
+			return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
 		}
-		s.suffix = Config.Suffix
+		s.suffix = fs.Config.Suffix
 	}
 	return s, nil
 }
@@ -141,7 +144,7 @@ func (s *syncCopyMove) aborting() bool {
 
 // This reads the map and pumps it into the channel passed in, closing
 // the channel at the end
-func (s *syncCopyMove) pumpMapToChan(files map[string]Object, out chan<- Object) {
+func (s *syncCopyMove) pumpMapToChan(files map[string]fs.Object, out chan<- fs.Object) {
 outer:
 	for _, o := range files {
 		if s.aborting() {
@@ -157,62 +160,6 @@ outer:
 	s.srcFilesResult <- nil
 }
 
-// NeedTransfer checks to see if src needs to be copied to dst using
-// the current config.
-//
-// Returns a flag which indicates whether the file needs to be
-// transferred or not.
-func NeedTransfer(dst, src Object) bool {
-	if dst == nil {
-		Debugf(src, "Couldn't find file - need to transfer")
-		return true
-	}
-	// If we should ignore existing files, don't transfer
-	if Config.IgnoreExisting {
-		Debugf(src, "Destination exists, skipping")
-		return false
-	}
-	// If we should upload unconditionally
-	if Config.IgnoreTimes {
-		Debugf(src, "Transferring unconditionally as --ignore-times is in use")
-		return true
-	}
-	// If UpdateOlder is in effect, skip if dst is newer than src
-	if Config.UpdateOlder {
-		srcModTime := src.ModTime()
-		dstModTime := dst.ModTime()
-		dt := dstModTime.Sub(srcModTime)
-		// If have a mutually agreed precision then use that
-		modifyWindow := Config.ModifyWindow
-		if modifyWindow == ModTimeNotSupported {
-			// Otherwise use 1 second as a safe default as
-			// the resolution of the time a file was
-			// uploaded.
-			modifyWindow = time.Second
-		}
-		switch {
-		case dt >= modifyWindow:
-			Debugf(src, "Destination is newer than source, skipping")
-			return false
-		case dt <= -modifyWindow:
-			Debugf(src, "Destination is older than source, transferring")
-		default:
-			if src.Size() == dst.Size() {
-				Debugf(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
-				return false
-			}
-			Debugf(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
-		}
-	} else {
-		// Check to see if changed or not
-		if Equal(src, dst) {
-			Debugf(src, "Unchanged skipping")
-			return false
-		}
-	}
-	return true
-}
-
 // This checks the types of errors returned while copying files
 func (s *syncCopyMove) processError(err error) {
 	if err == nil {
@@ -221,12 +168,12 @@ func (s *syncCopyMove) processError(err error) {
 	s.errorMu.Lock()
 	defer s.errorMu.Unlock()
 	switch {
-	case IsFatalError(err):
+	case fserrors.IsFatalError(err):
 		if !s.aborting() {
 			s.cancel()
 		}
 		s.fatalErr = err
-	case IsNoRetryError(err):
+	case fserrors.IsNoRetryError(err):
 		s.noRetryErr = err
 	default:
 		s.err = err
@@ -252,7 +199,7 @@ func (s *syncCopyMove) currentError() error {
 // pairChecker reads Objects~s on in send to out if they need transferring.
 //
 // FIXME potentially doing lots of hashes at once
-func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
+func (s *syncCopyMove) pairChecker(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) {
 	defer wg.Done()
 	for {
 		if s.aborting() {
@@ -263,26 +210,26 @@ func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sy
 			if !ok {
 				return
 			}
-			src := pair.src
-			Stats.Checking(src.Remote())
+			src := pair.Src
+			accounting.Stats.Checking(src.Remote())
 			// Check to see if can store this
 			if src.Storable() {
-				if NeedTransfer(pair.dst, pair.src) {
+				if operations.NeedTransfer(pair.Dst, pair.Src) {
 					// If files are treated as immutable, fail if destination exists and does not match
-					if Config.Immutable && pair.dst != nil {
-						Errorf(pair.dst, "Source and destination exist but do not match: immutable file modified")
-						s.processError(ErrorImmutableModified)
+					if fs.Config.Immutable && pair.Dst != nil {
+						fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
+						s.processError(fs.ErrorImmutableModified)
 					} else {
 						// If destination already exists, then we must move it into --backup-dir if required
-						if pair.dst != nil && s.backupDir != nil {
-							remoteWithSuffix := pair.dst.Remote() + s.suffix
+						if pair.Dst != nil && s.backupDir != nil {
+							remoteWithSuffix := pair.Dst.Remote() + s.suffix
 							overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
-							_, err := Move(s.backupDir, overwritten, remoteWithSuffix, pair.dst)
+							_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
 							if err != nil {
 								s.processError(err)
 							} else {
 								// If successful zero out the dst as it is no longer there and copy the file
-								pair.dst = nil
+								pair.Dst = nil
 								out <- pair
 							}
 						} else {
@@ -293,11 +240,11 @@ func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sy
 					// If moving need to delete the files we don't need to copy
 					if s.DoMove {
 						// Delete src if no error on copy
-						s.processError(DeleteFile(src))
+						s.processError(operations.DeleteFile(src))
 					}
 				}
 			}
-			Stats.DoneChecking(src.Remote())
+			accounting.Stats.DoneChecking(src.Remote())
 		case <-s.ctx.Done():
 			return
 		}
@@ -306,7 +253,7 @@ func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sy
 
 // pairRenamer reads Objects~s on in and attempts to rename them,
 // otherwise it sends them out if they need transferring.
-func (s *syncCopyMove) pairRenamer(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
+func (s *syncCopyMove) pairRenamer(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) {
 	defer wg.Done()
 	for {
 		if s.aborting() {
@@ -317,7 +264,7 @@ func (s *syncCopyMove) pairRenamer(in ObjectPairChan, out ObjectPairChan, wg *sy
 			if !ok {
 				return
 			}
-			src := pair.src
+			src := pair.Src
 			if !s.tryRename(src) {
 				// pass on if not renamed
 				out <- pair
@@ -329,7 +276,7 @@ func (s *syncCopyMove) pairRenamer(in ObjectPairChan, out ObjectPairChan, wg *sy
 }
 
 // pairCopyOrMove reads Objects on in and moves or copies them.
-func (s *syncCopyMove) pairCopyOrMove(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
+func (s *syncCopyMove) pairCopyOrMove(in fs.ObjectPairChan, fdst fs.Fs, wg *sync.WaitGroup) {
 	defer wg.Done()
 	var err error
 	for {
@@ -341,15 +288,15 @@ func (s *syncCopyMove) pairCopyOrMove(in ObjectPairChan, fdst Fs, wg *sync.WaitG
 			if !ok {
 				return
 			}
-			src := pair.src
-			Stats.Transferring(src.Remote())
+			src := pair.Src
+			accounting.Stats.Transferring(src.Remote())
 			if s.DoMove {
-				_, err = Move(fdst, pair.dst, src.Remote(), src)
+				_, err = operations.Move(fdst, pair.Dst, src.Remote(), src)
 			} else {
-				_, err = Copy(fdst, pair.dst, src.Remote(), src)
+				_, err = operations.Copy(fdst, pair.Dst, src.Remote(), src)
 			}
 			s.processError(err)
-			Stats.DoneTransferring(src.Remote(), err == nil)
+			accounting.Stats.DoneTransferring(src.Remote(), err == nil)
 		case <-s.ctx.Done():
 			return
 		}
@@ -358,8 +305,8 @@ func (s *syncCopyMove) pairCopyOrMove(in ObjectPairChan, fdst Fs, wg *sync.WaitG
 
 // This starts the background checkers.
 func (s *syncCopyMove) startCheckers() {
-	s.checkerWg.Add(Config.Checkers)
-	for i := 0; i < Config.Checkers; i++ {
+	s.checkerWg.Add(fs.Config.Checkers)
+	for i := 0; i < fs.Config.Checkers; i++ {
 		go s.pairChecker(s.toBeChecked, s.toBeUploaded, &s.checkerWg)
 	}
 }
@@ -367,14 +314,14 @@ func (s *syncCopyMove) startCheckers() {
 // This stops the background checkers
 func (s *syncCopyMove) stopCheckers() {
 	close(s.toBeChecked)
-	Infof(s.fdst, "Waiting for checks to finish")
+	fs.Infof(s.fdst, "Waiting for checks to finish")
 	s.checkerWg.Wait()
 }
 
 // This starts the background transfers
 func (s *syncCopyMove) startTransfers() {
-	s.transfersWg.Add(Config.Transfers)
-	for i := 0; i < Config.Transfers; i++ {
+	s.transfersWg.Add(fs.Config.Transfers)
+	for i := 0; i < fs.Config.Transfers; i++ {
 		go s.pairCopyOrMove(s.toBeUploaded, s.fdst, &s.transfersWg)
 	}
 }
@@ -382,7 +329,7 @@ func (s *syncCopyMove) startTransfers() {
 // This stops the background transfers
 func (s *syncCopyMove) stopTransfers() {
 	close(s.toBeUploaded)
-	Infof(s.fdst, "Waiting for transfers to finish")
+	fs.Infof(s.fdst, "Waiting for transfers to finish")
 	s.transfersWg.Wait()
 }
 
@@ -391,8 +338,8 @@ func (s *syncCopyMove) startRenamers() {
 	if !s.trackRenames {
 		return
 	}
-	s.renamerWg.Add(Config.Checkers)
-	for i := 0; i < Config.Checkers; i++ {
+	s.renamerWg.Add(fs.Config.Checkers)
+	for i := 0; i < fs.Config.Checkers; i++ {
 		go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, &s.renamerWg)
 	}
 }
@@ -403,7 +350,7 @@ func (s *syncCopyMove) stopRenamers() {
 		return
 	}
 	close(s.toBeRenamed)
-	Infof(s.fdst, "Waiting for renames to finish")
+	fs.Infof(s.fdst, "Waiting for renames to finish")
 	s.renamerWg.Wait()
 }
 
@@ -432,20 +379,20 @@ func (s *syncCopyMove) stopTrackRenames() {
 
 // This starts the background deletion of files for --delete-during
 func (s *syncCopyMove) startDeleters() {
-	if s.deleteMode != DeleteModeDuring && s.deleteMode != DeleteModeOnly {
+	if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
 		return
 	}
 	s.deletersWg.Add(1)
 	go func() {
 		defer s.deletersWg.Done()
-		err := deleteFilesWithBackupDir(s.deleteFilesCh, s.backupDir)
+		err := operations.DeleteFilesWithBackupDir(s.deleteFilesCh, s.backupDir)
 		s.processError(err)
 	}()
 }
 
 // This stops the background deleters
 func (s *syncCopyMove) stopDeleters() {
-	if s.deleteMode != DeleteModeDuring && s.deleteMode != DeleteModeOnly {
+	if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
 		return
 	}
 	close(s.deleteFilesCh)
@@ -458,13 +405,13 @@ func (s *syncCopyMove) stopDeleters() {
 // checkSrcMap is clear then it assumes that the any source files that
 // have been found have been removed from dstFiles already.
 func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
-	if Stats.Errored() {
-		Errorf(s.fdst, "%v", ErrorNotDeleting)
-		return ErrorNotDeleting
+	if accounting.Stats.Errored() {
+		fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
+		return fs.ErrorNotDeleting
 	}
 
 	// Delete the spare files
-	toDelete := make(ObjectsChan, Config.Transfers)
+	toDelete := make(fs.ObjectsChan, fs.Config.Transfers)
 	go func() {
 		for remote, o := range s.dstFiles {
 			if checkSrcMap {
@@ -480,18 +427,18 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
 		}
 		close(toDelete)
 	}()
-	return deleteFilesWithBackupDir(toDelete, s.backupDir)
+	return operations.DeleteFilesWithBackupDir(toDelete, s.backupDir)
 }
 
 // This deletes the empty directories in the slice passed in.  It
 // ignores any errors deleting directories
-func deleteEmptyDirectories(f Fs, entries DirEntries) error {
+func deleteEmptyDirectories(f fs.Fs, entries fs.DirEntries) error {
 	if len(entries) == 0 {
 		return nil
 	}
-	if Stats.Errored() {
-		Errorf(f, "%v", ErrorNotDeletingDirs)
-		return ErrorNotDeletingDirs
+	if accounting.Stats.Errored() {
+		fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs)
+		return fs.ErrorNotDeletingDirs
 	}
 
 	// Now delete the empty directories starting from the longest path
@@ -500,25 +447,25 @@ func deleteEmptyDirectories(f Fs, entries DirEntries) error {
 	var okCount int
 	for i := len(entries) - 1; i >= 0; i-- {
 		entry := entries[i]
-		dir, ok := entry.(Directory)
+		dir, ok := entry.(fs.Directory)
 		if ok {
 			// TryRmdir only deletes empty directories
-			err := TryRmdir(f, dir.Remote())
+			err := operations.TryRmdir(f, dir.Remote())
 			if err != nil {
-				Debugf(logDirName(f, dir.Remote()), "Failed to Rmdir: %v", err)
+				fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err)
 				errorCount++
 			} else {
 				okCount++
 			}
 		} else {
-			Errorf(f, "Not a directory: %v", entry)
+			fs.Errorf(f, "Not a directory: %v", entry)
 		}
 	}
 	if errorCount > 0 {
-		Debugf(f, "failed to delete %d directories", errorCount)
+		fs.Debugf(f, "failed to delete %d directories", errorCount)
 	}
 	if okCount > 0 {
-		Debugf(f, "deleted %d directories", okCount)
+		fs.Debugf(f, "deleted %d directories", okCount)
 	}
 	return nil
 }
@@ -526,11 +473,11 @@ func deleteEmptyDirectories(f Fs, entries DirEntries) error {
 // renameHash makes a string with the size and the hash for rename detection
 //
 // it may return an empty string in which case no hash could be made
-func (s *syncCopyMove) renameHash(obj Object) (hash string) {
+func (s *syncCopyMove) renameHash(obj fs.Object) (hash string) {
 	var err error
 	hash, err = obj.Hash(s.commonHash)
 	if err != nil {
-		Debugf(obj, "Hash failed: %v", err)
+		fs.Debugf(obj, "Hash failed: %v", err)
 		return ""
 	}
 	if hash == "" {
@@ -540,7 +487,7 @@ func (s *syncCopyMove) renameHash(obj Object) (hash string) {
 }
 
 // pushRenameMap adds the object with hash to the rename map
-func (s *syncCopyMove) pushRenameMap(hash string, obj Object) {
+func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) {
 	s.renameMapMu.Lock()
 	s.renameMap[hash] = append(s.renameMap[hash], obj)
 	s.renameMapMu.Unlock()
@@ -548,7 +495,7 @@ func (s *syncCopyMove) pushRenameMap(hash string, obj Object) {
 
 // popRenameMap finds the object with hash and pop the first match from
 // renameMap or returns nil if not found.
-func (s *syncCopyMove) popRenameMap(hash string) (dst Object) {
+func (s *syncCopyMove) popRenameMap(hash string) (dst fs.Object) {
 	s.renameMapMu.Lock()
 	dsts, ok := s.renameMap[hash]
 	if ok && len(dsts) > 0 {
@@ -566,7 +513,7 @@ func (s *syncCopyMove) popRenameMap(hash string) (dst Object) {
 // makeRenameMap builds a map of the destination files by hash that
 // match sizes in the slice of objects in s.renameCheck
 func (s *syncCopyMove) makeRenameMap() {
-	Infof(s.fdst, "Making map for --track-renames")
+	fs.Infof(s.fdst, "Making map for --track-renames")
 
 	// first make a map of possible sizes we need to check
 	possibleSizes := map[int64]struct{}{}
@@ -575,38 +522,38 @@ func (s *syncCopyMove) makeRenameMap() {
 	}
 
 	// pump all the dstFiles into in
-	in := make(chan Object, Config.Checkers)
+	in := make(chan fs.Object, fs.Config.Checkers)
 	go s.pumpMapToChan(s.dstFiles, in)
 
 	// now make a map of size,hash for all dstFiles
-	s.renameMap = make(map[string][]Object)
+	s.renameMap = make(map[string][]fs.Object)
 	var wg sync.WaitGroup
-	wg.Add(Config.Transfers)
-	for i := 0; i < Config.Transfers; i++ {
+	wg.Add(fs.Config.Transfers)
+	for i := 0; i < fs.Config.Transfers; i++ {
 		go func() {
 			defer wg.Done()
 			for obj := range in {
-				// only create hash for dst Object if its size could match
+				// only create hash for dst fs.Object if its size could match
 				if _, found := possibleSizes[obj.Size()]; found {
-					Stats.Checking(obj.Remote())
+					accounting.Stats.Checking(obj.Remote())
 					hash := s.renameHash(obj)
 					if hash != "" {
 						s.pushRenameMap(hash, obj)
 					}
-					Stats.DoneChecking(obj.Remote())
+					accounting.Stats.DoneChecking(obj.Remote())
 				}
 			}
 		}()
 	}
 	wg.Wait()
-	Infof(s.fdst, "Finished making map for --track-renames")
+	fs.Infof(s.fdst, "Finished making map for --track-renames")
 }
 
 // tryRename renames a src object when doing track renames if
 // possible, it returns true if the object was renamed.
-func (s *syncCopyMove) tryRename(src Object) bool {
-	Stats.Checking(src.Remote())
-	defer Stats.DoneChecking(src.Remote())
+func (s *syncCopyMove) tryRename(src fs.Object) bool {
+	accounting.Stats.Checking(src.Remote())
+	defer accounting.Stats.DoneChecking(src.Remote())
 
 	// Calculate the hash of the src object
 	hash := s.renameHash(src)
@@ -624,9 +571,9 @@ func (s *syncCopyMove) tryRename(src Object) bool {
 	dstOverwritten, _ := s.fdst.NewObject(src.Remote())
 
 	// Rename dst to have name src.Remote()
-	_, err := Move(s.fdst, dstOverwritten, src.Remote(), dst)
+	_, err := operations.Move(s.fdst, dstOverwritten, src.Remote(), dst)
 	if err != nil {
-		Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
+		fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
 		return false
 	}
 
@@ -635,7 +582,7 @@ func (s *syncCopyMove) tryRename(src Object) bool {
 	delete(s.dstFiles, dst.Remote())
 	s.dstFilesMu.Unlock()
 
-	Infof(src, "Renamed from %q", dst.Remote())
+	fs.Infof(src, "Renamed from %q", dst.Remote())
 	return true
 }
 
@@ -647,8 +594,8 @@ func (s *syncCopyMove) tryRename(src Object) bool {
 //
 // dir is the start directory, "" for root
 func (s *syncCopyMove) run() error {
-	if Same(s.fdst, s.fsrc) {
-		Errorf(s.fdst, "Nothing to do as source and destination are the same")
+	if operations.Same(s.fdst, s.fsrc) {
+		fs.Errorf(s.fdst, "Nothing to do as source and destination are the same")
 		return nil
 	}
 
@@ -657,13 +604,13 @@ func (s *syncCopyMove) run() error {
 	s.startRenamers()
 	s.startTransfers()
 	s.startDeleters()
-	s.dstFiles = make(map[string]Object)
+	s.dstFiles = make(map[string]fs.Object)
 
 	s.startTrackRenames()
 
 	// set up a march over fdst and fsrc
-	m := newMarch(s.ctx, s.fdst, s.fsrc, s.dir, s)
-	m.run()
+	m := march.New(s.ctx, s.fdst, s.fsrc, s.dir, s)
+	m.Run()
 
 	s.stopTrackRenames()
 	if s.trackRenames {
@@ -671,7 +618,7 @@ func (s *syncCopyMove) run() error {
 		s.makeRenameMap()
 		// Attempt renames for all the files which don't have a matching dst
 		for _, src := range s.renameCheck {
-			s.toBeRenamed <- ObjectPair{src, nil}
+			s.toBeRenamed <- fs.ObjectPair{Src: src, Dst: nil}
 		}
 	}
 
@@ -682,18 +629,18 @@ func (s *syncCopyMove) run() error {
 	s.stopDeleters()
 
 	// Delete files after
-	if s.deleteMode == DeleteModeAfter {
+	if s.deleteMode == fs.DeleteModeAfter {
 		if s.currentError() != nil {
-			Errorf(s.fdst, "%v", ErrorNotDeleting)
+			fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
 		} else {
 			s.processError(s.deleteFiles(false))
 		}
 	}
 
 	// Prune empty directories
-	if s.deleteMode != DeleteModeOff {
+	if s.deleteMode != fs.DeleteModeOff {
 		if s.currentError() != nil {
-			Errorf(s.fdst, "%v", ErrorNotDeletingDirs)
+			fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs)
 		} else {
 			s.processError(deleteEmptyDirectories(s.fdst, s.dstEmptyDirs))
 		}
@@ -709,24 +656,24 @@ func (s *syncCopyMove) run() error {
 }
 
 // DstOnly have an object which is in the destination only
-func (s *syncCopyMove) DstOnly(dst DirEntry) (recurse bool) {
-	if s.deleteMode == DeleteModeOff {
+func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) {
+	if s.deleteMode == fs.DeleteModeOff {
 		return false
 	}
 	switch x := dst.(type) {
-	case Object:
+	case fs.Object:
 		switch s.deleteMode {
-		case DeleteModeAfter:
+		case fs.DeleteModeAfter:
 			// record object as needs deleting
 			s.dstFilesMu.Lock()
 			s.dstFiles[x.Remote()] = x
 			s.dstFilesMu.Unlock()
-		case DeleteModeDuring, DeleteModeOnly:
+		case fs.DeleteModeDuring, fs.DeleteModeOnly:
 			s.deleteFilesCh <- x
 		default:
 			panic(fmt.Sprintf("unexpected delete mode %d", s.deleteMode))
 		}
-	case Directory:
+	case fs.Directory:
 		// Do the same thing to the entire contents of the directory
 		// Record directory as it is potentially empty and needs deleting
 		if s.fdst.Features().CanHaveEmptyDirectories {
@@ -743,20 +690,20 @@ func (s *syncCopyMove) DstOnly(dst DirEntry) (recurse bool) {
 }
 
 // SrcOnly have an object which is in the source only
-func (s *syncCopyMove) SrcOnly(src DirEntry) (recurse bool) {
-	if s.deleteMode == DeleteModeOnly {
+func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
+	if s.deleteMode == fs.DeleteModeOnly {
 		return false
 	}
 	switch x := src.(type) {
-	case Object:
+	case fs.Object:
 		if s.trackRenames {
 			// Save object to check for a rename later
 			s.trackRenamesCh <- x
 		} else {
 			// No need to check since doesn't exist
-			s.toBeUploaded <- ObjectPair{x, nil}
+			s.toBeUploaded <- fs.ObjectPair{Src: x, Dst: nil}
 		}
-	case Directory:
+	case fs.Directory:
 		// Do the same thing to the entire contents of the directory
 		// Record the directory for deletion
 		s.srcEmptyDirsMu.Lock()
@@ -770,24 +717,24 @@ func (s *syncCopyMove) SrcOnly(src DirEntry) (recurse bool) {
 }
 
 // Match is called when src and dst are present, so sync src to dst
-func (s *syncCopyMove) Match(dst, src DirEntry) (recurse bool) {
+func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
 	switch srcX := src.(type) {
-	case Object:
-		if s.deleteMode == DeleteModeOnly {
+	case fs.Object:
+		if s.deleteMode == fs.DeleteModeOnly {
 			return false
 		}
-		dstX, ok := dst.(Object)
+		dstX, ok := dst.(fs.Object)
 		if ok {
-			s.toBeChecked <- ObjectPair{srcX, dstX}
+			s.toBeChecked <- fs.ObjectPair{Src: srcX, Dst: dstX}
 		} else {
 			// FIXME src is file, dst is directory
 			err := errors.New("can't overwrite directory with file")
-			Errorf(dst, "%v", err)
+			fs.Errorf(dst, "%v", err)
 			s.processError(err)
 		}
-	case Directory:
+	case fs.Directory:
 		// Do the same thing to the entire contents of the directory
-		_, ok := dst.(Directory)
+		_, ok := dst.(fs.Directory)
 		if ok {
 			// Record the src directory for deletion
 			s.srcEmptyDirsMu.Lock()
@@ -797,7 +744,7 @@ func (s *syncCopyMove) Match(dst, src DirEntry) (recurse bool) {
 		}
 		// FIXME src is dir, dst is file
 		err := errors.New("can't overwrite file with directory")
-		Errorf(dst, "%v", err)
+		fs.Errorf(dst, "%v", err)
 		s.processError(err)
 	default:
 		panic("Bad object in DirEntries")
@@ -812,20 +759,17 @@ func (s *syncCopyMove) Match(dst, src DirEntry) (recurse bool) {
 // If DoMove is true then files will be moved instead of copied
 //
 // dir is the start directory, "" for root
-func runSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
-	if *oldSyncMethod {
-		return FatalError(errors.New("--old-sync-method is deprecated use --fast-list instead"))
-	}
-	if deleteMode != DeleteModeOff && DoMove {
-		return FatalError(errors.New("can't delete and move at the same time"))
+func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
+	if deleteMode != fs.DeleteModeOff && DoMove {
+		return fserrors.FatalError(errors.New("can't delete and move at the same time"))
 	}
 	// Run an extra pass to delete only
-	if deleteMode == DeleteModeBefore {
-		if Config.TrackRenames {
-			return FatalError(errors.New("can't use --delete-before with --track-renames"))
+	if deleteMode == fs.DeleteModeBefore {
+		if fs.Config.TrackRenames {
+			return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
 		}
 		// only delete stuff during in this pass
-		do, err := newSyncCopyMove(fdst, fsrc, DeleteModeOnly, false, deleteEmptySrcDirs)
+		do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
 		if err != nil {
 			return err
 		}
@@ -834,7 +778,7 @@ func runSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEm
 			return err
 		}
 		// Next pass does a copy only
-		deleteMode = DeleteModeOff
+		deleteMode = fs.DeleteModeOff
 	}
 	do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
 	if err != nil {
@@ -844,52 +788,52 @@ func runSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool, deleteEm
 }
 
 // Sync fsrc into fdst
-func Sync(fdst, fsrc Fs) error {
-	return runSyncCopyMove(fdst, fsrc, Config.DeleteMode, false, false)
+func Sync(fdst, fsrc fs.Fs) error {
+	return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
 }
 
 // CopyDir copies fsrc into fdst
-func CopyDir(fdst, fsrc Fs) error {
-	return runSyncCopyMove(fdst, fsrc, DeleteModeOff, false, false)
+func CopyDir(fdst, fsrc fs.Fs) error {
+	return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
 }
 
 // moveDir moves fsrc into fdst
-func moveDir(fdst, fsrc Fs, deleteEmptySrcDirs bool) error {
-	return runSyncCopyMove(fdst, fsrc, DeleteModeOff, true, deleteEmptySrcDirs)
+func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
+	return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
 }
 
 // MoveDir moves fsrc into fdst
-func MoveDir(fdst, fsrc Fs, deleteEmptySrcDirs bool) error {
-	if Same(fdst, fsrc) {
-		Errorf(fdst, "Nothing to do as source and destination are the same")
+func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
+	if operations.Same(fdst, fsrc) {
+		fs.Errorf(fdst, "Nothing to do as source and destination are the same")
 		return nil
 	}
 
 	// First attempt to use DirMover if exists, same Fs and no filters are active
-	if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && SameConfig(fsrc, fdst) && Config.Filter.InActive() {
-		if Config.DryRun {
-			Logf(fdst, "Not doing server side directory move as --dry-run")
+	if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && operations.SameConfig(fsrc, fdst) && filter.Active.InActive() {
+		if fs.Config.DryRun {
+			fs.Logf(fdst, "Not doing server side directory move as --dry-run")
 			return nil
 		}
-		Debugf(fdst, "Using server side directory move")
+		fs.Debugf(fdst, "Using server side directory move")
 		err := fdstDirMove(fsrc, "", "")
 		switch err {
-		case ErrorCantDirMove, ErrorDirExists:
-			Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err)
+		case fs.ErrorCantDirMove, fs.ErrorDirExists:
+			fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err)
 		case nil:
-			Infof(fdst, "Server side directory move succeeded")
+			fs.Infof(fdst, "Server side directory move succeeded")
 			return nil
 		default:
-			Stats.Error(err)
-			Errorf(fdst, "Server side directory move failed: %v", err)
+			fs.CountError(err)
+			fs.Errorf(fdst, "Server side directory move failed: %v", err)
 			return err
 		}
 	}
 
 	// The two remotes mustn't overlap if we didn't do server side move
-	if Overlapping(fdst, fsrc) {
-		err := ErrorCantMoveOverlapping
-		Errorf(fdst, "%v", err)
+	if operations.Overlapping(fdst, fsrc) {
+		err := fs.ErrorCantMoveOverlapping
+		fs.Errorf(fdst, "%v", err)
 		return err
 	}
 
diff --git a/fs/sync_test.go b/fs/sync/sync_test.go
similarity index 83%
rename from fs/sync_test.go
rename to fs/sync/sync_test.go
index 710831ba5..354fd9453 100644
--- a/fs/sync_test.go
+++ b/fs/sync/sync_test.go
@@ -1,19 +1,36 @@
 // Test sync/copy/move
 
-package fs_test
+package sync
 
 import (
 	"runtime"
 	"testing"
 	"time"
 
+	_ "github.com/ncw/rclone/backend/all" // import all backends
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/ncw/rclone/fstest"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"golang.org/x/text/unicode/norm"
 )
 
+// Some times used in the tests
+var (
+	t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
+	t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
+	t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
+)
+
+// TestMain drives the tests
+func TestMain(m *testing.M) {
+	fstest.TestMain(m)
+}
+
 // Check dry run is working
 func TestCopyWithDryRun(t *testing.T) {
 	r := fstest.NewRun(t)
@@ -22,7 +39,7 @@ func TestCopyWithDryRun(t *testing.T) {
 	r.Mkdir(r.Fremote)
 
 	fs.Config.DryRun = true
-	err := fs.CopyDir(r.Fremote, r.Flocal)
+	err := CopyDir(r.Fremote, r.Flocal)
 	fs.Config.DryRun = false
 	require.NoError(t, err)
 
@@ -37,7 +54,7 @@ func TestCopy(t *testing.T) {
 	file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
 	r.Mkdir(r.Fremote)
 
-	err := fs.CopyDir(r.Fremote, r.Flocal)
+	err := CopyDir(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
@@ -54,7 +71,7 @@ func TestCopyNoTraverse(t *testing.T) {
 
 	file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
 
-	err := fs.CopyDir(r.Fremote, r.Flocal)
+	err := CopyDir(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
@@ -71,8 +88,8 @@ func TestSyncNoTraverse(t *testing.T) {
 
 	file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
@@ -90,7 +107,7 @@ func TestCopyWithDepth(t *testing.T) {
 	fs.Config.MaxDepth = 1
 	defer func() { fs.Config.MaxDepth = -1 }()
 
-	err := fs.CopyDir(r.Fremote, r.Flocal)
+	err := CopyDir(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1, file2)
@@ -109,7 +126,7 @@ func TestServerSideCopy(t *testing.T) {
 	defer finaliseCopy()
 	t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
 
-	err = fs.CopyDir(FremoteCopy, r.Fremote)
+	err = CopyDir(FremoteCopy, r.Fremote)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, FremoteCopy, file1)
@@ -124,10 +141,10 @@ func TestCopyAfterDelete(t *testing.T) {
 	fstest.CheckItems(t, r.Flocal)
 	fstest.CheckItems(t, r.Fremote, file1)
 
-	err := fs.Mkdir(r.Flocal, "")
+	err := operations.Mkdir(r.Flocal, "")
 	require.NoError(t, err)
 
-	err = fs.CopyDir(r.Fremote, r.Flocal)
+	err = CopyDir(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal)
@@ -141,7 +158,7 @@ func TestCopyRedownload(t *testing.T) {
 	file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
 	fstest.CheckItems(t, r.Fremote, file1)
 
-	err := fs.CopyDir(r.Flocal, r.Fremote)
+	err := CopyDir(r.Flocal, r.Fremote)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
@@ -159,24 +176,24 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
 	file1 := r.WriteFile("check sum", "", t1)
 	fstest.CheckItems(t, r.Flocal, file1)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred exactly one file.
-	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	// Change last modified date only
 	file2 := r.WriteFile("check sum", "", t2)
 	fstest.CheckItems(t, r.Flocal, file2)
 
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred no files
-	assert.Equal(t, int64(0), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
 	fstest.CheckItems(t, r.Flocal, file2)
 	fstest.CheckItems(t, r.Fremote, file1)
 }
@@ -193,24 +210,24 @@ func TestSyncSizeOnly(t *testing.T) {
 	file1 := r.WriteFile("sizeonly", "potato", t1)
 	fstest.CheckItems(t, r.Flocal, file1)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred exactly one file.
-	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	// Update mtime, md5sum but not length of file
 	file2 := r.WriteFile("sizeonly", "POTATO", t2)
 	fstest.CheckItems(t, r.Flocal, file2)
 
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred no files
-	assert.Equal(t, int64(0), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
 	fstest.CheckItems(t, r.Flocal, file2)
 	fstest.CheckItems(t, r.Fremote, file1)
 }
@@ -227,24 +244,24 @@ func TestSyncIgnoreSize(t *testing.T) {
 	file1 := r.WriteFile("ignore-size", "contents", t1)
 	fstest.CheckItems(t, r.Flocal, file1)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred exactly one file.
-	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	// Update size but not date of file
 	file2 := r.WriteFile("ignore-size", "longer contents but same date", t1)
 	fstest.CheckItems(t, r.Flocal, file2)
 
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred no files
-	assert.Equal(t, int64(0), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
 	fstest.CheckItems(t, r.Flocal, file2)
 	fstest.CheckItems(t, r.Fremote, file1)
 }
@@ -255,24 +272,24 @@ func TestSyncIgnoreTimes(t *testing.T) {
 	file1 := r.WriteBoth("existing", "potato", t1)
 	fstest.CheckItems(t, r.Fremote, file1)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred exactly 0 files because the
 	// files were identical.
-	assert.Equal(t, int64(0), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(0), accounting.Stats.GetTransfers())
 
 	fs.Config.IgnoreTimes = true
 	defer func() { fs.Config.IgnoreTimes = false }()
 
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred exactly one file even though the
 	// files were identical.
-	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
 
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file1)
@@ -286,16 +303,16 @@ func TestSyncIgnoreExisting(t *testing.T) {
 	fs.Config.IgnoreExisting = true
 	defer func() { fs.Config.IgnoreExisting = false }()
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	// Change everything
 	r.WriteFile("existing", "newpotatoes", t2)
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	// Items should not change
 	fstest.CheckItems(t, r.Fremote, file1)
@@ -313,8 +330,8 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
 	fs.Config.DryRun = true
 	defer func() { fs.Config.DryRun = false }()
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
@@ -322,8 +339,8 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
 
 	fs.Config.DryRun = false
 
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
@@ -350,8 +367,8 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file2)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
@@ -371,15 +388,15 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file2)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	// We should have transferred exactly one file, not set the mod time
-	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
 }
 
 func TestSyncAfterAddingAFile(t *testing.T) {
@@ -391,8 +408,8 @@ func TestSyncAfterAddingAFile(t *testing.T) {
 	fstest.CheckItems(t, r.Flocal, file1, file2)
 	fstest.CheckItems(t, r.Fremote, file1)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file1, file2)
 	fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -406,8 +423,8 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1)
 	fstest.CheckItems(t, r.Flocal, file2)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file2)
 	fstest.CheckItems(t, r.Fremote, file2)
@@ -429,8 +446,8 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1)
 	fstest.CheckItems(t, r.Flocal, file2)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file2)
 	fstest.CheckItems(t, r.Fremote, file2)
@@ -445,8 +462,8 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
 	file3 := r.WriteBoth("empty space", "", t2)
 
 	fs.Config.DryRun = true
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	fs.Config.DryRun = false
 	require.NoError(t, err)
 
@@ -464,8 +481,8 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file2, file3)
 	fstest.CheckItems(t, r.Flocal, file1, file3)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file1, file3)
 	fstest.CheckItems(t, r.Fremote, file1, file3)
@@ -478,8 +495,8 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
 	file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1)
 	file2 := r.WriteObject("b/potato", "SMALLER BUT SAME DATE", t2)
 	file3 := r.WriteBoth("c/non empty space", "AhHa!", t2)
-	require.NoError(t, fs.Mkdir(r.Fremote, "d"))
-	require.NoError(t, fs.Mkdir(r.Fremote, "d/e"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "d"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "d/e"))
 
 	fstest.CheckListingWithPrecision(
 		t,
@@ -510,8 +527,8 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
 		fs.Config.ModifyWindow,
 	)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckListingWithPrecision(
@@ -549,7 +566,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
 	file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1)
 	file2 := r.WriteObject("b/potato", "SMALLER BUT SAME DATE", t2)
 	file3 := r.WriteBoth("c/non empty space", "AhHa!", t2)
-	require.NoError(t, fs.Mkdir(r.Fremote, "d"))
+	require.NoError(t, operations.Mkdir(r.Fremote, "d"))
 
 	fstest.CheckListingWithPrecision(
 		t,
@@ -579,9 +596,9 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
 		fs.Config.ModifyWindow,
 	)
 
-	fs.Stats.ResetCounters()
-	fs.Stats.Error(nil)
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	fs.CountError(nil)
+	err := Sync(r.Fremote, r.Flocal)
 	assert.Equal(t, fs.ErrorNotDeleting, err)
 
 	fstest.CheckListingWithPrecision(
@@ -657,8 +674,8 @@ func TestCopyDeleteBefore(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1)
 	fstest.CheckItems(t, r.Flocal, file2)
 
-	fs.Stats.ResetCounters()
-	err := fs.CopyDir(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := CopyDir(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -675,20 +692,20 @@ func TestSyncWithExclude(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1, file2)
 	fstest.CheckItems(t, r.Flocal, file1, file2, file3)
 
-	fs.Config.Filter.MaxSize = 40
+	filter.Active.Opt.MaxSize = 40
 	defer func() {
-		fs.Config.Filter.MaxSize = -1
+		filter.Active.Opt.MaxSize = -1
 	}()
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Fremote, file2, file1)
 
 	// Now sync the other way round and check enormous doesn't get
 	// deleted as it is excluded from the sync
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Flocal, r.Fremote)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Flocal, r.Fremote)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file2, file1, file3)
 }
@@ -703,22 +720,22 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1, file2, file3)
 	fstest.CheckItems(t, r.Flocal, file1, file2, file3)
 
-	fs.Config.Filter.MaxSize = 40
-	fs.Config.Filter.DeleteExcluded = true
+	filter.Active.Opt.MaxSize = 40
+	filter.Active.Opt.DeleteExcluded = true
 	defer func() {
-		fs.Config.Filter.MaxSize = -1
-		fs.Config.Filter.DeleteExcluded = false
+		filter.Active.Opt.MaxSize = -1
+		filter.Active.Opt.DeleteExcluded = false
 	}()
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Fremote, file2)
 
 	// Check sync the other way round to make sure enormous gets
 	// deleted even though it is excluded
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Flocal, r.Fremote)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Flocal, r.Fremote)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file2)
 }
@@ -752,8 +769,8 @@ func TestSyncWithUpdateOlder(t *testing.T) {
 		fs.Config.ModifyWindow = oldModifyWindow
 	}()
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
 }
@@ -769,15 +786,15 @@ func TestSyncWithTrackRenames(t *testing.T) {
 
 	}()
 
-	haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != fs.HashNone
-	canTrackRenames := haveHash && fs.CanServerSideMove(r.Fremote)
+	haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.HashNone
+	canTrackRenames := haveHash && operations.CanServerSideMove(r.Fremote)
 	t.Logf("Can track renames: %v", canTrackRenames)
 
 	f1 := r.WriteFile("potato", "Potato Content", t1)
 	f2 := r.WriteFile("yam", "Yam Content", t2)
 
-	fs.Stats.ResetCounters()
-	require.NoError(t, fs.Sync(r.Fremote, r.Flocal))
+	accounting.Stats.ResetCounters()
+	require.NoError(t, Sync(r.Fremote, r.Flocal))
 
 	fstest.CheckItems(t, r.Fremote, f1, f2)
 	fstest.CheckItems(t, r.Flocal, f1, f2)
@@ -785,15 +802,15 @@ func TestSyncWithTrackRenames(t *testing.T) {
 	// Now rename locally.
 	f2 = r.RenameFile(f2, "yaml")
 
-	fs.Stats.ResetCounters()
-	require.NoError(t, fs.Sync(r.Fremote, r.Flocal))
+	accounting.Stats.ResetCounters()
+	require.NoError(t, Sync(r.Fremote, r.Flocal))
 
 	fstest.CheckItems(t, r.Fremote, f1, f2)
 
 	if canTrackRenames {
-		assert.Equal(t, fs.Stats.GetTransfers(), int64(0))
+		assert.Equal(t, accounting.Stats.GetTransfers(), int64(0))
 	} else {
-		assert.Equal(t, fs.Stats.GetTransfers(), int64(1))
+		assert.Equal(t, accounting.Stats.GetTransfers(), int64(1))
 	}
 }
 
@@ -808,7 +825,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
 	file3u := r.WriteBoth("potato3", "------------------------------------------------------------ UPDATED", t2)
 
 	if testDeleteEmptyDirs {
-		err := fs.Mkdir(r.Fremote, "tomatoDir")
+		err := operations.Mkdir(r.Fremote, "tomatoDir")
 		require.NoError(t, err)
 	}
 
@@ -822,8 +839,8 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
 	fstest.CheckItems(t, FremoteMove, file2, file3)
 
 	// Do server side move
-	fs.Stats.ResetCounters()
-	err = fs.MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
+	accounting.Stats.ResetCounters()
+	err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
 	require.NoError(t, err)
 
 	if withFilter {
@@ -844,13 +861,13 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
 	defer finaliseMove2()
 
 	if testDeleteEmptyDirs {
-		err := fs.Mkdir(FremoteMove, "tomatoDir")
+		err := operations.Mkdir(FremoteMove, "tomatoDir")
 		require.NoError(t, err)
 	}
 
 	// Move it back to a new empty remote, dst does not exist this time
-	fs.Stats.ResetCounters()
-	err = fs.MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
+	accounting.Stats.ResetCounters()
+	err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
 	require.NoError(t, err)
 
 	if withFilter {
@@ -878,9 +895,9 @@ func TestServerSideMoveWithFilter(t *testing.T) {
 	r := fstest.NewRun(t)
 	defer r.Finalise()
 
-	fs.Config.Filter.MinSize = 40
+	filter.Active.Opt.MinSize = 40
 	defer func() {
-		fs.Config.Filter.MinSize = -1
+		filter.Active.Opt.MinSize = -1
 	}()
 
 	testServerSideMove(t, r, true, false)
@@ -910,15 +927,15 @@ func TestServerSideMoveOverlap(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	// Subdir move with no filters should return ErrorCantMoveOverlapping
-	err = fs.MoveDir(FremoteMove, r.Fremote, false)
+	err = MoveDir(FremoteMove, r.Fremote, false)
 	assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
 
 	// Now try with a filter which should also fail with ErrorCantMoveOverlapping
-	fs.Config.Filter.MinSize = 40
+	filter.Active.Opt.MinSize = 40
 	defer func() {
-		fs.Config.Filter.MinSize = -1
+		filter.Active.Opt.MinSize = -1
 	}()
-	err = fs.MoveDir(FremoteMove, r.Fremote, false)
+	err = MoveDir(FremoteMove, r.Fremote, false)
 	assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error())
 }
 
@@ -927,7 +944,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
 	r := fstest.NewRun(t)
 	defer r.Finalise()
 
-	if !fs.CanServerSideMove(r.Fremote) {
+	if !operations.CanServerSideMove(r.Fremote) {
 		t.Skip("Skipping test as remote does not support server side move")
 	}
 	r.Mkdir(r.Fremote)
@@ -953,8 +970,8 @@ func testSyncBackupDir(t *testing.T, suffix string) {
 	fdst, err := fs.NewFs(r.FremoteName + "/dst")
 	require.NoError(t, err)
 
-	fs.Stats.ResetCounters()
-	err = fs.Sync(fdst, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(fdst, r.Flocal)
 	require.NoError(t, err)
 
 	// one should be moved to the backup dir and the new one installed
@@ -974,8 +991,8 @@ func testSyncBackupDir(t *testing.T, suffix string) {
 
 	// This should delete three and overwrite one again, checking
 	// the files got overwritten correctly in backup-dir
-	fs.Stats.ResetCounters()
-	err = fs.Sync(fdst, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(fdst, r.Flocal)
 	require.NoError(t, err)
 
 	// one should be moved to the backup dir and the new one installed
@@ -1011,13 +1028,13 @@ func TestSyncUTFNorm(t *testing.T) {
 	file2 := r.WriteObject(Encoding2, "This is a old test", t2)
 	fstest.CheckItems(t, r.Fremote, file2)
 
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 
 	// We should have transferred exactly one file, but kept the
 	// normalized state of the file.
-	assert.Equal(t, int64(1), fs.Stats.GetTransfers())
+	assert.Equal(t, int64(1), accounting.Stats.GetTransfers())
 	fstest.CheckItems(t, r.Flocal, file1)
 	file1.Path = file2.Path
 	fstest.CheckItems(t, r.Fremote, file1)
@@ -1037,8 +1054,8 @@ func TestSyncImmutable(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote)
 
 	// Should succeed
-	fs.Stats.ResetCounters()
-	err := fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err := Sync(r.Fremote, r.Flocal)
 	require.NoError(t, err)
 	fstest.CheckItems(t, r.Flocal, file1)
 	fstest.CheckItems(t, r.Fremote, file1)
@@ -1049,8 +1066,8 @@ func TestSyncImmutable(t *testing.T) {
 	fstest.CheckItems(t, r.Fremote, file1)
 
 	// Should fail with ErrorImmutableModified and not modify local or remote files
-	fs.Stats.ResetCounters()
-	err = fs.Sync(r.Fremote, r.Flocal)
+	accounting.Stats.ResetCounters()
+	err = Sync(r.Fremote, r.Flocal)
 	assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
 	fstest.CheckItems(t, r.Flocal, file2)
 	fstest.CheckItems(t, r.Fremote, file1)
diff --git a/fs/walk.go b/fs/walk/walk.go
similarity index 78%
rename from fs/walk.go
rename to fs/walk/walk.go
index dc4e092a0..2b4a46f0b 100644
--- a/fs/walk.go
+++ b/fs/walk/walk.go
@@ -1,6 +1,5 @@
-// Walking directories
-
-package fs
+// Package walk walks directories
+package walk
 
 import (
 	"bytes"
@@ -11,6 +10,9 @@ import (
 	"sync"
 	"time"
 
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fs/list"
 	"github.com/pkg/errors"
 )
 
@@ -23,7 +25,7 @@ var ErrorSkipDir = errors.New("skip this directory")
 // capable of doing a recursive listing.
 var ErrorCantListR = errors.New("recursive directory listing not available")
 
-// WalkFunc is the type of the function called for directory
+// Func is the type of the function called for directory
 // visited by Walk. The path argument contains remote path to the directory.
 //
 // If there was a problem walking to directory named by path, the
@@ -33,7 +35,7 @@ var ErrorCantListR = errors.New("recursive directory listing not available")
 // sole exception is when the function returns the special value
 // ErrorSkipDir. If the function returns ErrorSkipDir, Walk skips the
 // directory's contents entirely.
-type WalkFunc func(path string, entries DirEntries, err error) error
+type Func func(path string, entries fs.DirEntries, err error) error
 
 // Walk lists the directory.
 //
@@ -53,25 +55,25 @@ type WalkFunc func(path string, entries DirEntries, err error) error
 // and f supports it and level > 1, or WalkN otherwise.
 //
 // NB (f, path) to be replaced by fs.Dir at some point
-func Walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error {
-	if (maxLevel < 0 || maxLevel > 1) && Config.UseListR && f.Features().ListR != nil {
-		return WalkR(f, path, includeAll, maxLevel, fn)
+func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
+	if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
+		return walkListR(f, path, includeAll, maxLevel, fn)
 	}
-	return WalkN(f, path, includeAll, maxLevel, fn)
+	return walkListDirSorted(f, path, includeAll, maxLevel, fn)
 }
 
-// WalkN lists the directory.
+// walkListDirSorted lists the directory.
 //
 // It implements Walk using non recursive directory listing.
-func WalkN(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error {
-	return walk(f, path, includeAll, maxLevel, fn, ListDirSorted)
+func walkListDirSorted(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
+	return walk(f, path, includeAll, maxLevel, fn, list.DirSorted)
 }
 
-// WalkR lists the directory.
+// walkListR lists the directory.
 //
 // It implements Walk using recursive directory listing if
 // available, or returns ErrorCantListR if not.
-func WalkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error {
+func walkListR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
 	listR := f.Features().ListR
 	if listR == nil {
 		return ErrorCantListR
@@ -79,9 +81,9 @@ func WalkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc) error
 	return walkR(f, path, includeAll, maxLevel, fn, listR)
 }
 
-type listDirFunc func(fs Fs, includeAll bool, dir string) (entries DirEntries, err error)
+type listDirFunc func(fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)
 
-func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir listDirFunc) error {
+func walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
 	var (
 		wg         sync.WaitGroup // sync closing of go routines
 		traversing sync.WaitGroup // running directory traversals
@@ -94,7 +96,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
 		depth  int
 	}
 
-	in := make(chan listJob, Config.Checkers)
+	in := make(chan listJob, fs.Config.Checkers)
 	errs := make(chan error, 1)
 	quit := make(chan struct{})
 	closeQuit := func() {
@@ -107,7 +109,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
 			}()
 		})
 	}
-	for i := 0; i < Config.Checkers; i++ {
+	for i := 0; i < fs.Config.Checkers; i++ {
 		wg.Add(1)
 		go func() {
 			defer wg.Done()
@@ -120,7 +122,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
 					entries, err := listDir(f, includeAll, job.remote)
 					var jobs []listJob
 					if err == nil && job.depth != 0 {
-						entries.ForDir(func(dir Directory) {
+						entries.ForDir(func(dir fs.Directory) {
 							// Recurse for the directory
 							jobs = append(jobs, listJob{
 								remote: dir.Remote(),
@@ -134,8 +136,8 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
 					// NB once we have passed entries to fn we mustn't touch it again
 					if err != nil && err != ErrorSkipDir {
 						traversing.Done()
-						Stats.Error(err)
-						Errorf(job.remote, "error listing: %v", err)
+						fs.CountError(err)
+						fs.Errorf(job.remote, "error listing: %v", err)
 						closeQuit()
 						// Send error to error channel if space
 						select {
@@ -176,7 +178,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
 }
 
 // DirTree is a map of directories to entries
-type DirTree map[string]DirEntries
+type DirTree map[string]fs.DirEntries
 
 // parentDir finds the parent directory of path
 func parentDir(entryPath string) string {
@@ -188,13 +190,13 @@ func parentDir(entryPath string) string {
 }
 
 // add an entry to the tree
-func (dt DirTree) add(entry DirEntry) {
+func (dt DirTree) add(entry fs.DirEntry) {
 	dirPath := parentDir(entry.Remote())
 	dt[dirPath] = append(dt[dirPath], entry)
 }
 
 // add a directory entry to the tree
-func (dt DirTree) addDir(entry DirEntry) {
+func (dt DirTree) addDir(entry fs.DirEntry) {
 	dt.add(entry)
 	// create the directory itself if it doesn't exist already
 	dirPath := entry.Remote()
@@ -204,7 +206,7 @@ func (dt DirTree) addDir(entry DirEntry) {
 }
 
 // Find returns the DirEntry for filePath or nil if not found
-func (dt DirTree) Find(filePath string) (parentPath string, entry DirEntry) {
+func (dt DirTree) Find(filePath string) (parentPath string, entry fs.DirEntry) {
 	parentPath = parentDir(filePath)
 	for _, entry := range dt[parentPath] {
 		if entry.Remote() == filePath {
@@ -223,7 +225,7 @@ func (dt DirTree) checkParent(root, dirPath string) {
 	if entry != nil {
 		return
 	}
-	dt[parentPath] = append(dt[parentPath], NewDir(dirPath, time.Now()))
+	dt[parentPath] = append(dt[parentPath], fs.NewDir(dirPath, time.Now()))
 	dt.checkParent(root, parentPath)
 }
 
@@ -264,7 +266,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
 			// true, therefore this should not
 			// happen. But this makes function
 			// more predictable.
-			Infof(dName, "Directory in the map for prune, but the value is false")
+			fs.Infof(dName, "Directory in the map for prune, but the value is false")
 			continue
 		}
 		if dName == "" {
@@ -277,7 +279,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
 		// such case the loop will be skipped.
 		for i, entry := range dt[parent] {
 			switch x := entry.(type) {
-			case Directory:
+			case fs.Directory:
 				if x.Remote() == dName {
 					// the slice is not sorted yet
 					// to delete item
@@ -289,7 +291,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
 					// iterating immediately
 					break
 				}
-			case Object:
+			case fs.Object:
 				// do nothing
 			default:
 				return errors.Errorf("unknown object type %T", entry)
@@ -303,7 +305,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
 		// during range iteration, they may be skipped.
 		for dName, remove := range dirNames {
 			if !remove {
-				Infof(dName, "Directory in the map for prune, but the value is false")
+				fs.Infof(dName, "Directory in the map for prune, but the value is false")
 				continue
 			}
 			// First, add all subdirectories to dirNames.
@@ -312,10 +314,10 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
 			// If so, the loop will be skipped.
 			for _, entry := range dt[dName] {
 				switch x := entry.(type) {
-				case Directory:
+				case fs.Directory:
 					excludeDir := x.Remote()
 					dirNames[excludeDir] = true
-				case Object:
+				case fs.Object:
 					// do nothing
 				default:
 					return errors.Errorf("unknown object type %T", entry)
@@ -338,7 +340,7 @@ func (dt DirTree) String() string {
 		fmt.Fprintf(out, "%s/\n", dir)
 		for _, entry := range dt[dir] {
 			flag := ""
-			if _, ok := entry.(Directory); ok {
+			if _, ok := entry.(fs.Directory); ok {
 				flag = "/"
 			}
 			fmt.Fprintf(out, "  %s%s\n", path.Base(entry.Remote()), flag)
@@ -347,22 +349,22 @@ func (dt DirTree) String() string {
 	return out.String()
 }
 
-func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR ListRFn) (DirTree, error) {
+func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (DirTree, error) {
 	dirs := make(DirTree)
 	// Entries can come in arbitrary order. We use toPrune to keep
 	// all directories to exclude later.
 	toPrune := make(map[string]bool)
-	includeDirectory := Config.Filter.IncludeDirectory(f)
+	includeDirectory := filter.Active.IncludeDirectory(f)
 	var mu sync.Mutex
-	err := listR(startPath, func(entries DirEntries) error {
+	err := listR(startPath, func(entries fs.DirEntries) error {
 		mu.Lock()
 		defer mu.Unlock()
 		for _, entry := range entries {
 			slashes := strings.Count(entry.Remote(), "/")
 			switch x := entry.(type) {
-			case Object:
+			case fs.Object:
 				// Make sure we don't delete excluded files if not required
-				if includeAll || Config.Filter.IncludeObject(x) {
+				if includeAll || filter.Active.IncludeObject(x) {
 					if maxLevel < 0 || slashes <= maxLevel-1 {
 						dirs.add(x)
 					} else {
@@ -374,18 +376,18 @@ func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR L
 						dirs.checkParent(startPath, dirPath)
 					}
 				} else {
-					Debugf(x, "Excluded from sync (and deletion)")
+					fs.Debugf(x, "Excluded from sync (and deletion)")
 				}
 				// Check if we need to prune a directory later.
-				if !includeAll && len(Config.Filter.ExcludeFile) > 0 {
+				if !includeAll && len(filter.Active.Opt.ExcludeFile) > 0 {
 					basename := path.Base(x.Remote())
-					if basename == Config.Filter.ExcludeFile {
+					if basename == filter.Active.Opt.ExcludeFile {
 						excludeDir := parentDir(x.Remote())
 						toPrune[excludeDir] = true
-						Debugf(basename, "Excluded from sync (and deletion) based on exclude file")
+						fs.Debugf(basename, "Excluded from sync (and deletion) based on exclude file")
 					}
 				}
-			case Directory:
+			case fs.Directory:
 				inc, err := includeDirectory(x.Remote())
 				if err != nil {
 					return err
@@ -400,7 +402,7 @@ func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR L
 						}
 					}
 				} else {
-					Debugf(x, "Excluded from sync (and deletion)")
+					fs.Debugf(x, "Excluded from sync (and deletion)")
 				}
 			default:
 				return errors.Errorf("unknown object type %T", entry)
@@ -424,9 +426,9 @@ func walkRDirTree(f Fs, startPath string, includeAll bool, maxLevel int, listR L
 }
 
 // Create a DirTree using List
-func walkNDirTree(f Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) {
+func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) {
 	dirs := make(DirTree)
-	fn := func(dirPath string, entries DirEntries, err error) error {
+	fn := func(dirPath string, entries fs.DirEntries, err error) error {
 		if err == nil {
 			dirs[dirPath] = entries
 		}
@@ -451,21 +453,21 @@ func walkNDirTree(f Fs, path string, includeAll bool, maxLevel int, listDir list
 // and f supports it and level > 1, or WalkN otherwise.
 //
 // NB (f, path) to be replaced by fs.Dir at some point
-func NewDirTree(f Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
-	if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && Config.UseListR && ListR != nil {
+func NewDirTree(f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
+	if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && ListR != nil {
 		return walkRDirTree(f, path, includeAll, maxLevel, ListR)
 	}
-	return walkNDirTree(f, path, includeAll, maxLevel, ListDirSorted)
+	return walkNDirTree(f, path, includeAll, maxLevel, list.DirSorted)
 }
 
-func walkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listR ListRFn) error {
+func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error {
 	dirs, err := walkRDirTree(f, path, includeAll, maxLevel, listR)
 	if err != nil {
 		return err
 	}
 	skipping := false
 	skipPrefix := ""
-	emptyDir := DirEntries{}
+	emptyDir := fs.DirEntries{}
 	for _, dirPath := range dirs.Dirs() {
 		if skipping {
 			// Skip over directories as required
@@ -492,17 +494,17 @@ func walkR(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listR
 	return nil
 }
 
-// WalkGetAll runs Walk getting all the results
-func WalkGetAll(f Fs, path string, includeAll bool, maxLevel int) (objs []Object, dirs []Directory, err error) {
-	err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries DirEntries, err error) error {
+// GetAll runs Walk getting all the results
+func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
+	err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error {
 		if err != nil {
 			return err
 		}
 		for _, entry := range entries {
 			switch x := entry.(type) {
-			case Object:
+			case fs.Object:
 				objs = append(objs, x)
-			case Directory:
+			case fs.Directory:
 				dirs = append(dirs, x)
 			}
 		}
@@ -513,12 +515,12 @@ func WalkGetAll(f Fs, path string, includeAll bool, maxLevel int) (objs []Object
 
 // ListRHelper is used in the implementation of ListR to accumulate DirEntries
 type ListRHelper struct {
-	callback ListRCallback
-	entries  DirEntries
+	callback fs.ListRCallback
+	entries  fs.DirEntries
 }
 
 // NewListRHelper should be called from ListR with the callback passed in
-func NewListRHelper(callback ListRCallback) *ListRHelper {
+func NewListRHelper(callback fs.ListRCallback) *ListRHelper {
 	return &ListRHelper{
 		callback: callback,
 	}
@@ -536,7 +538,7 @@ func (lh *ListRHelper) send(max int) (err error) {
 
 // Add an entry to the stored entries and send them if there are more
 // than a certain amount
-func (lh *ListRHelper) Add(entry DirEntry) error {
+func (lh *ListRHelper) Add(entry fs.DirEntry) error {
 	if entry == nil {
 		return nil
 	}
diff --git a/fs/walk_test.go b/fs/walk/walk_test.go
similarity index 67%
rename from fs/walk_test.go
rename to fs/walk/walk_test.go
index 362b31901..91ca8fb2f 100644
--- a/fs/walk_test.go
+++ b/fs/walk/walk_test.go
@@ -1,12 +1,14 @@
-package fs
+package walk
 
 import (
 	"fmt"
-	"io"
 	"sync"
 	"testing"
-	"time"
 
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/filter"
+	"github.com/ncw/rclone/fstest/mockdir"
+	"github.com/ncw/rclone/fstest/mockobject"
 	"github.com/pkg/errors"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -14,7 +16,7 @@ import (
 
 type (
 	listResult struct {
-		entries DirEntries
+		entries fs.DirEntries
 		err     error
 	}
 
@@ -25,7 +27,7 @@ type (
 	listDirs struct {
 		mu          sync.Mutex
 		t           *testing.T
-		fs          Fs
+		fs          fs.Fs
 		includeAll  bool
 		results     listResults
 		walkResults listResults
@@ -36,32 +38,7 @@ type (
 	}
 )
 
-var errNotImpl = errors.New("not implemented")
-
-type mockObject string
-
-func (o mockObject) String() string                                    { return string(o) }
-func (o mockObject) Fs() Info                                          { return nil }
-func (o mockObject) Remote() string                                    { return string(o) }
-func (o mockObject) Hash(HashType) (string, error)                     { return "", errNotImpl }
-func (o mockObject) ModTime() (t time.Time)                            { return t }
-func (o mockObject) Size() int64                                       { return 0 }
-func (o mockObject) Storable() bool                                    { return true }
-func (o mockObject) SetModTime(time.Time) error                        { return errNotImpl }
-func (o mockObject) Open(options ...OpenOption) (io.ReadCloser, error) { return nil, errNotImpl }
-func (o mockObject) Update(in io.Reader, src ObjectInfo, options ...OpenOption) error {
-	return errNotImpl
-}
-func (o mockObject) Remove() error { return errNotImpl }
-
-type unknownDirEntry string
-
-func (o unknownDirEntry) String() string         { return string(o) }
-func (o unknownDirEntry) Remote() string         { return string(o) }
-func (o unknownDirEntry) ModTime() (t time.Time) { return t }
-func (o unknownDirEntry) Size() int64            { return 0 }
-
-func newListDirs(t *testing.T, f Fs, includeAll bool, results listResults, walkErrors errorMap, finalError error) *listDirs {
+func newListDirs(t *testing.T, f fs.Fs, includeAll bool, results listResults, walkErrors errorMap, finalError error) *listDirs {
 	return &listDirs{
 		t:           t,
 		fs:          f,
@@ -88,7 +65,7 @@ func (ls *listDirs) SetLevel(maxLevel int) *listDirs {
 }
 
 // ListDir returns the expected listing for the directory
-func (ls *listDirs) ListDir(f Fs, includeAll bool, dir string) (entries DirEntries, err error) {
+func (ls *listDirs) ListDir(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
 	ls.mu.Lock()
 	defer ls.mu.Unlock()
 	assert.Equal(ls.t, ls.fs, f)
@@ -109,7 +86,7 @@ func (ls *listDirs) ListDir(f Fs, includeAll bool, dir string) (entries DirEntri
 }
 
 // ListR returns the expected listing for the directory using ListR
-func (ls *listDirs) ListR(dir string, callback ListRCallback) (err error) {
+func (ls *listDirs) ListR(dir string, callback fs.ListRCallback) (err error) {
 	ls.mu.Lock()
 	defer ls.mu.Unlock()
 
@@ -140,7 +117,7 @@ func (ls *listDirs) IsFinished() {
 }
 
 // WalkFn is called by the walk to test the expectations
-func (ls *listDirs) WalkFn(dir string, entries DirEntries, err error) error {
+func (ls *listDirs) WalkFn(dir string, entries fs.DirEntries, err error) error {
 	ls.mu.Lock()
 	defer ls.mu.Unlock()
 	// ls.t.Logf("WalkFn(%q, %v, %q)", dir, entries, err)
@@ -184,14 +161,10 @@ func (ls *listDirs) WalkR() {
 	}
 }
 
-func newDir(name string) Directory {
-	return NewDir(name, time.Time{})
-}
-
 func testWalkEmpty(t *testing.T) *listDirs {
 	return newListDirs(t, nil, false,
 		listResults{
-			"": {entries: DirEntries{}, err: nil},
+			"": {entries: fs.DirEntries{}, err: nil},
 		},
 		errorMap{
 			"": nil,
@@ -205,7 +178,7 @@ func TestWalkREmpty(t *testing.T) { testWalkEmpty(t).WalkR() }
 func testWalkEmptySkip(t *testing.T) *listDirs {
 	return newListDirs(t, nil, true,
 		listResults{
-			"": {entries: DirEntries{}, err: nil},
+			"": {entries: fs.DirEntries{}, err: nil},
 		},
 		errorMap{
 			"": ErrorSkipDir,
@@ -219,12 +192,12 @@ func TestWalkREmptySkip(t *testing.T) { testWalkEmptySkip(t).WalkR() }
 func testWalkNotFound(t *testing.T) *listDirs {
 	return newListDirs(t, nil, true,
 		listResults{
-			"": {err: ErrorDirNotFound},
+			"": {err: fs.ErrorDirNotFound},
 		},
 		errorMap{
-			"": ErrorDirNotFound,
+			"": fs.ErrorDirNotFound,
 		},
-		ErrorDirNotFound,
+		fs.ErrorDirNotFound,
 	)
 }
 func TestWalkNotFound(t *testing.T)  { testWalkNotFound(t).Walk() }
@@ -234,7 +207,7 @@ func TestWalkNotFoundMaskError(t *testing.T) {
 	// this doesn't work for WalkR
 	newListDirs(t, nil, true,
 		listResults{
-			"": {err: ErrorDirNotFound},
+			"": {err: fs.ErrorDirNotFound},
 		},
 		errorMap{
 			"": nil,
@@ -247,7 +220,7 @@ func TestWalkNotFoundSkipkError(t *testing.T) {
 	// this doesn't work for WalkR
 	newListDirs(t, nil, true,
 		listResults{
-			"": {err: ErrorDirNotFound},
+			"": {err: fs.ErrorDirNotFound},
 		},
 		errorMap{
 			"": ErrorSkipDir,
@@ -257,21 +230,21 @@ func TestWalkNotFoundSkipkError(t *testing.T) {
 }
 
 func testWalkLevels(t *testing.T, maxLevel int) *listDirs {
-	da := newDir("a")
-	oA := mockObject("A")
-	db := newDir("a/b")
-	oB := mockObject("a/B")
-	dc := newDir("a/b/c")
-	oC := mockObject("a/b/C")
-	dd := newDir("a/b/c/d")
-	oD := mockObject("a/b/c/D")
+	da := mockdir.New("a")
+	oA := mockobject.Object("A")
+	db := mockdir.New("a/b")
+	oB := mockobject.Object("a/B")
+	dc := mockdir.New("a/b/c")
+	oC := mockobject.Object("a/b/C")
+	dd := mockdir.New("a/b/c/d")
+	oD := mockobject.Object("a/b/c/D")
 	return newListDirs(t, nil, false,
 		listResults{
-			"":        {entries: DirEntries{oA, da}, err: nil},
-			"a":       {entries: DirEntries{oB, db}, err: nil},
-			"a/b":     {entries: DirEntries{oC, dc}, err: nil},
-			"a/b/c":   {entries: DirEntries{oD, dd}, err: nil},
-			"a/b/c/d": {entries: DirEntries{}, err: nil},
+			"":        {entries: fs.DirEntries{oA, da}, err: nil},
+			"a":       {entries: fs.DirEntries{oB, db}, err: nil},
+			"a/b":     {entries: fs.DirEntries{oC, dc}, err: nil},
+			"a/b/c":   {entries: fs.DirEntries{oD, dd}, err: nil},
+			"a/b/c/d": {entries: fs.DirEntries{}, err: nil},
 		},
 		errorMap{
 			"":        nil,
@@ -309,11 +282,11 @@ a/b/c/d/
 }
 
 func testWalkLevelsNoRecursive(t *testing.T) *listDirs {
-	da := newDir("a")
-	oA := mockObject("A")
+	da := mockdir.New("a")
+	oA := mockobject.Object("A")
 	return newListDirs(t, nil, false,
 		listResults{
-			"": {entries: DirEntries{oA, da}, err: nil},
+			"": {entries: fs.DirEntries{oA, da}, err: nil},
 		},
 		errorMap{
 			"": nil,
@@ -325,14 +298,14 @@ func TestWalkLevelsNoRecursive(t *testing.T)  { testWalkLevelsNoRecursive(t).Wal
 func TestWalkRLevelsNoRecursive(t *testing.T) { testWalkLevelsNoRecursive(t).WalkR() }
 
 func testWalkLevels2(t *testing.T) *listDirs {
-	da := newDir("a")
-	oA := mockObject("A")
-	db := newDir("a/b")
-	oB := mockObject("a/B")
+	da := mockdir.New("a")
+	oA := mockobject.Object("A")
+	db := mockdir.New("a/b")
+	oB := mockobject.Object("a/B")
 	return newListDirs(t, nil, false,
 		listResults{
-			"":  {entries: DirEntries{oA, da}, err: nil},
-			"a": {entries: DirEntries{oB, db}, err: nil},
+			"":  {entries: fs.DirEntries{oA, da}, err: nil},
+			"a": {entries: fs.DirEntries{oB, db}, err: nil},
 		},
 		errorMap{
 			"":  nil,
@@ -345,14 +318,14 @@ func TestWalkLevels2(t *testing.T)  { testWalkLevels2(t).Walk() }
 func TestWalkRLevels2(t *testing.T) { testWalkLevels2(t).WalkR() }
 
 func testWalkSkip(t *testing.T) *listDirs {
-	da := newDir("a")
-	db := newDir("a/b")
-	dc := newDir("a/b/c")
+	da := mockdir.New("a")
+	db := mockdir.New("a/b")
+	dc := mockdir.New("a/b/c")
 	return newListDirs(t, nil, false,
 		listResults{
-			"":    {entries: DirEntries{da}, err: nil},
-			"a":   {entries: DirEntries{db}, err: nil},
-			"a/b": {entries: DirEntries{dc}, err: nil},
+			"":    {entries: fs.DirEntries{da}, err: nil},
+			"a":   {entries: fs.DirEntries{db}, err: nil},
+			"a/b": {entries: fs.DirEntries{dc}, err: nil},
 		},
 		errorMap{
 			"":    nil,
@@ -368,19 +341,19 @@ func TestWalkRSkip(t *testing.T) { testWalkSkip(t).WalkR() }
 func testWalkErrors(t *testing.T) *listDirs {
 	lr := listResults{}
 	em := errorMap{}
-	de := make(DirEntries, 10)
+	de := make(fs.DirEntries, 10)
 	for i := range de {
 		path := string('0' + i)
-		de[i] = newDir(path)
-		lr[path] = listResult{entries: nil, err: ErrorDirNotFound}
-		em[path] = ErrorDirNotFound
+		de[i] = mockdir.New(path)
+		lr[path] = listResult{entries: nil, err: fs.ErrorDirNotFound}
+		em[path] = fs.ErrorDirNotFound
 	}
 	lr[""] = listResult{entries: de, err: nil}
 	em[""] = nil
 	return newListDirs(t, nil, true,
 		lr,
 		em,
-		ErrorDirNotFound,
+		fs.ErrorDirNotFound,
 	).NoCheckMaps()
 }
 func TestWalkErrors(t *testing.T)  { testWalkErrors(t).Walk() }
@@ -393,14 +366,14 @@ func makeTree(level int, terminalErrors bool) (listResults, errorMap) {
 	em := errorMap{}
 	var fill func(path string, level int)
 	fill = func(path string, level int) {
-		de := DirEntries{}
+		de := fs.DirEntries{}
 		if level > 0 {
 			for _, a := range "0123456789" {
 				subPath := string(a)
 				if path != "" {
 					subPath = path + "/" + subPath
 				}
-				de = append(de, newDir(subPath))
+				de = append(de, mockdir.New(subPath))
 				fill(subPath, level-1)
 			}
 		}
@@ -437,8 +410,8 @@ func TestWalkMultiErrors(t *testing.T)  { testWalkMultiErrors(t).Walk() }
 func TestWalkRMultiErrors(t *testing.T) { testWalkMultiErrors(t).Walk() }
 
 // a very simple listRcallback function
-func makeListRCallback(entries DirEntries, err error) ListRFn {
-	return func(dir string, callback ListRCallback) error {
+func makeListRCallback(entries fs.DirEntries, err error) fs.ListRFn {
+	return func(dir string, callback fs.ListRCallback) error {
 		if err == nil {
 			err = callback(entries)
 		}
@@ -448,22 +421,22 @@ func makeListRCallback(entries DirEntries, err error) ListRFn {
 
 func TestWalkRDirTree(t *testing.T) {
 	for _, test := range []struct {
-		entries DirEntries
+		entries fs.DirEntries
 		want    string
 		err     error
 		root    string
 		level   int
 	}{
-		{DirEntries{}, "/\n", nil, "", -1},
-		{DirEntries{mockObject("a")}, `/
+		{fs.DirEntries{}, "/\n", nil, "", -1},
+		{fs.DirEntries{mockobject.Object("a")}, `/
   a
 `, nil, "", -1},
-		{DirEntries{mockObject("a/b")}, `/
+		{fs.DirEntries{mockobject.Object("a/b")}, `/
   a/
 a/
   b
 `, nil, "", -1},
-		{DirEntries{mockObject("a/b/c/d")}, `/
+		{fs.DirEntries{mockobject.Object("a/b/c/d")}, `/
   a/
 a/
   b/
@@ -472,17 +445,17 @@ a/b/
 a/b/c/
   d
 `, nil, "", -1},
-		{DirEntries{mockObject("a")}, "", errorBoom, "", -1},
-		{DirEntries{
-			mockObject("0/1/2/3"),
-			mockObject("4/5/6/7"),
-			mockObject("8/9/a/b"),
-			mockObject("c/d/e/f"),
-			mockObject("g/h/i/j"),
-			mockObject("k/l/m/n"),
-			mockObject("o/p/q/r"),
-			mockObject("s/t/u/v"),
-			mockObject("w/x/y/z"),
+		{fs.DirEntries{mockobject.Object("a")}, "", errorBoom, "", -1},
+		{fs.DirEntries{
+			mockobject.Object("0/1/2/3"),
+			mockobject.Object("4/5/6/7"),
+			mockobject.Object("8/9/a/b"),
+			mockobject.Object("c/d/e/f"),
+			mockobject.Object("g/h/i/j"),
+			mockobject.Object("k/l/m/n"),
+			mockobject.Object("o/p/q/r"),
+			mockobject.Object("s/t/u/v"),
+			mockobject.Object("w/x/y/z"),
 		}, `/
   0/
   4/
@@ -548,10 +521,10 @@ w/x/
 w/x/y/
   z
 `, nil, "", -1},
-		{DirEntries{
-			mockObject("a/b/c/d/e/f1"),
-			mockObject("a/b/c/d/e/f2"),
-			mockObject("a/b/c/d/e/f3"),
+		{fs.DirEntries{
+			mockobject.Object("a/b/c/d/e/f1"),
+			mockobject.Object("a/b/c/d/e/f2"),
+			mockobject.Object("a/b/c/d/e/f3"),
 		}, `a/b/c/
   d/
 a/b/c/d/
@@ -561,12 +534,12 @@ a/b/c/d/e/
   f2
   f3
 `, nil, "a/b/c", -1},
-		{DirEntries{
-			mockObject("A"),
-			mockObject("a/B"),
-			mockObject("a/b/C"),
-			mockObject("a/b/c/D"),
-			mockObject("a/b/c/d/E"),
+		{fs.DirEntries{
+			mockobject.Object("A"),
+			mockobject.Object("a/B"),
+			mockobject.Object("a/b/C"),
+			mockobject.Object("a/b/c/D"),
+			mockobject.Object("a/b/c/d/E"),
 		}, `/
   A
   a/
@@ -574,9 +547,9 @@ a/
   B
   b/
 `, nil, "", 2},
-		{DirEntries{
-			mockObject("a/b/c"),
-			mockObject("a/b/c/d/e"),
+		{fs.DirEntries{
+			mockobject.Object("a/b/c"),
+			mockobject.Object("a/b/c/d/e"),
 		}, `/
   a/
 a/
@@ -591,7 +564,7 @@ a/
 
 func TestWalkRDirTreeExclude(t *testing.T) {
 	for _, test := range []struct {
-		entries     DirEntries
+		entries     fs.DirEntries
 		want        string
 		err         error
 		root        string
@@ -599,21 +572,21 @@ func TestWalkRDirTreeExclude(t *testing.T) {
 		excludeFile string
 		includeAll  bool
 	}{
-		{DirEntries{mockObject("a"), mockObject("ignore")}, "", nil, "", -1, "ignore", false},
-		{DirEntries{mockObject("a")}, `/
+		{fs.DirEntries{mockobject.Object("a"), mockobject.Object("ignore")}, "", nil, "", -1, "ignore", false},
+		{fs.DirEntries{mockobject.Object("a")}, `/
   a
 `, nil, "", -1, "ignore", false},
-		{DirEntries{
-			mockObject("a"),
-			mockObject("b/b"),
-			mockObject("b/.ignore"),
+		{fs.DirEntries{
+			mockobject.Object("a"),
+			mockobject.Object("b/b"),
+			mockobject.Object("b/.ignore"),
 		}, `/
   a
 `, nil, "", -1, ".ignore", false},
-		{DirEntries{
-			mockObject("a"),
-			mockObject("b/.ignore"),
-			mockObject("b/b"),
+		{fs.DirEntries{
+			mockobject.Object("a"),
+			mockobject.Object("b/.ignore"),
+			mockobject.Object("b/b"),
 		}, `/
   a
   b/
@@ -621,24 +594,24 @@ b/
   .ignore
   b
 `, nil, "", -1, ".ignore", true},
-		{DirEntries{
-			mockObject("a"),
-			mockObject("b/b"),
-			mockObject("b/c/d/e"),
-			mockObject("b/c/ign"),
-			mockObject("b/c/x"),
+		{fs.DirEntries{
+			mockobject.Object("a"),
+			mockobject.Object("b/b"),
+			mockobject.Object("b/c/d/e"),
+			mockobject.Object("b/c/ign"),
+			mockobject.Object("b/c/x"),
 		}, `/
   a
   b/
 b/
   b
 `, nil, "", -1, "ign", false},
-		{DirEntries{
-			mockObject("a"),
-			mockObject("b/b"),
-			mockObject("b/c/d/e"),
-			mockObject("b/c/ign"),
-			mockObject("b/c/x"),
+		{fs.DirEntries{
+			mockobject.Object("a"),
+			mockobject.Object("b/b"),
+			mockobject.Object("b/c/d/e"),
+			mockobject.Object("b/c/ign"),
+			mockobject.Object("b/c/x"),
 		}, `/
   a
   b/
@@ -653,11 +626,11 @@ b/c/d/
   e
 `, nil, "", -1, "ign", true},
 	} {
-		Config.Filter.ExcludeFile = test.excludeFile
+		filter.Active.Opt.ExcludeFile = test.excludeFile
 		r, err := walkRDirTree(nil, test.root, test.includeAll, test.level, makeListRCallback(test.entries, test.err))
 		assert.Equal(t, test.err, err, fmt.Sprintf("%+v", test))
 		assert.Equal(t, test.want, r.String(), fmt.Sprintf("%+v", test))
 	}
 	// Set to default value, to avoid side effects
-	Config.Filter.ExcludeFile = ""
+	filter.Active.Opt.ExcludeFile = ""
 }
diff --git a/fstest/fstest.go b/fstest/fstest.go
index 673495499..0e7c47e30 100644
--- a/fstest/fstest.go
+++ b/fstest/fstest.go
@@ -22,6 +22,10 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"golang.org/x/text/unicode/norm"
@@ -54,8 +58,8 @@ func Initialise() {
 	// Never ask for passwords, fail instead.
 	// If your local config is encrypted set environment variable
 	// "RCLONE_CONFIG_PASS=hunter2" (or your password)
-	*fs.AskPassword = false
-	fs.LoadConfig()
+	fs.Config.AskPassword = false
+	config.LoadConfig()
 	if *Verbose {
 		fs.Config.LogLevel = fs.LogLevelDebug
 	}
@@ -72,7 +76,7 @@ func Initialise() {
 // Item represents an item for checking
 type Item struct {
 	Path    string
-	Hashes  map[fs.HashType]string
+	Hashes  map[hash.Type]string
 	ModTime time.Time
 	Size    int64
 	WinPath string
@@ -85,7 +89,7 @@ func NewItem(Path, Content string, modTime time.Time) Item {
 		ModTime: modTime,
 		Size:    int64(len(Content)),
 	}
-	hash := fs.NewMultiHasher()
+	hash := hash.NewMultiHasher()
 	buf := bytes.NewBufferString(Content)
 	_, err := io.Copy(hash, buf)
 	if err != nil {
@@ -115,11 +119,11 @@ func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, prec
 func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
 	require.NotNil(t, obj)
 	types := obj.Fs().Hashes().Array()
-	for _, hash := range types {
+	for _, Hash := range types {
 		// Check attributes
-		sum, err := obj.Hash(hash)
+		sum, err := obj.Hash(Hash)
 		require.NoError(t, err)
-		assert.True(t, fs.HashEquals(i.Hashes[hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), hash, i.Hashes[hash], sum))
+		assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
 	}
 }
 
@@ -252,7 +256,7 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs
 		expectedDirs = filterEmptyDirs(t, items, expectedDirs)
 	}
 	is := NewItems(items)
-	oldErrors := fs.Stats.GetErrors()
+	oldErrors := accounting.Stats.GetErrors()
 	var objs []fs.Object
 	var dirs []fs.Directory
 	var err error
@@ -262,7 +266,7 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs
 	gotListing := "<unset>"
 	listingOK := false
 	for i := 1; i <= retries; i++ {
-		objs, dirs, err = fs.WalkGetAll(f, "", true, -1)
+		objs, dirs, err = walk.GetAll(f, "", true, -1)
 		if err != nil && err != fs.ErrorDirNotFound {
 			t.Fatalf("Error listing: %v", err)
 		}
@@ -294,8 +298,8 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs
 	}
 	is.Done(t)
 	// Don't notice an error when listing an empty directory
-	if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 {
-		fs.Stats.ResetErrors()
+	if len(items) == 0 && oldErrors == 0 && accounting.Stats.GetErrors() == 1 {
+		accounting.Stats.ResetErrors()
 	}
 	// Check the directories
 	if expectedDirs != nil {
@@ -418,9 +422,9 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error)
 	}
 
 	finalise := func() {
-		_ = fs.Purge(remote) // ignore error
+		Purge(remote)
 		if parentRemote != nil {
-			err = fs.Purge(parentRemote) // ignore error
+			Purge(parentRemote)
 			if err != nil {
 				log.Printf("Failed to purge %v: %v", parentRemote, err)
 			}
@@ -430,22 +434,48 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error)
 	return remote, remoteName, finalise, nil
 }
 
-// TestMkdir tests Mkdir works
-func TestMkdir(t *testing.T, remote fs.Fs) {
-	err := fs.Mkdir(remote, "")
-	require.NoError(t, err)
-	CheckListing(t, remote, []Item{})
-}
-
-// TestPurge tests Purge works
-func TestPurge(t *testing.T, remote fs.Fs) {
-	err := fs.Purge(remote)
-	require.NoError(t, err)
-	CheckListing(t, remote, []Item{})
-}
-
-// TestRmdir tests Rmdir works
-func TestRmdir(t *testing.T, remote fs.Fs) {
-	err := fs.Rmdir(remote, "")
-	require.NoError(t, err)
+// Purge is a simplified re-implementation of operations.Purge for the
+// test routine cleanup to avoid circular dependencies.
+//
+// It logs errors rather than returning them
+func Purge(f fs.Fs) {
+	var err error
+	doFallbackPurge := true
+	if doPurge := f.Features().Purge; doPurge != nil {
+		doFallbackPurge = false
+		err = doPurge()
+		if err == fs.ErrorCantPurge {
+			doFallbackPurge = true
+		}
+	}
+	if doFallbackPurge {
+		var dirs []string
+		err = walk.Walk(f, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
+			if err != nil {
+				log.Printf("purge walk returned error: %v", err)
+				return nil
+			}
+			entries.ForObject(func(obj fs.Object) {
+				err = obj.Remove()
+				if err != nil {
+					log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
+				}
+			})
+			entries.ForDir(func(dir fs.Directory) {
+				dirs = append(dirs, dir.Remote())
+			})
+			return nil
+		})
+		sort.Strings(dirs)
+		for i := len(dirs) - 1; i >= 0; i-- {
+			dir := dirs[i]
+			err := f.Rmdir(dir)
+			if err != nil {
+				log.Printf("purge failed to rmdir %q: %v", dir, err)
+			}
+		}
+	}
+	if err != nil {
+		log.Printf("purge failed: %v", err)
+	}
 }
diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go
index ffce3ae31..b1bfc963f 100644
--- a/fstest/fstests/fstests.go
+++ b/fstest/fstests/fstests.go
@@ -19,6 +19,12 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/hash"
+	"github.com/ncw/rclone/fs/object"
+	"github.com/ncw/rclone/fs/operations"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/ncw/rclone/fstest"
 	"github.com/pkg/errors"
 	"github.com/stretchr/testify/assert"
@@ -84,7 +90,7 @@ func TestInit(t *testing.T) {
 
 	// Set extra config if supplied
 	for _, item := range ExtraConfig {
-		fs.ConfigFileSet(item.Name, item.Key, item.Value)
+		config.FileSet(item.Name, item.Key, item.Value)
 	}
 	if *fstest.RemoteName != "" {
 		RemoteName = *fstest.RemoteName
@@ -99,7 +105,10 @@ func TestInit(t *testing.T) {
 	newFs(t)
 
 	skipIfNotOk(t)
-	fstest.TestMkdir(t, remote)
+
+	err = remote.Mkdir("")
+	require.NoError(t, err)
+	fstest.CheckListing(t, remote, []fstest.Item{})
 }
 
 func skipIfNotOk(t *testing.T) {
@@ -156,7 +165,8 @@ func TestFsRoot(t *testing.T) {
 // TestFsRmdirEmpty tests deleting an empty directory
 func TestFsRmdirEmpty(t *testing.T) {
 	skipIfNotOk(t)
-	fstest.TestRmdir(t, remote)
+	err := remote.Rmdir("")
+	require.NoError(t, err)
 }
 
 // TestFsRmdirNotFound tests deleting a non existent directory
@@ -175,23 +185,27 @@ func TestFsMkdir(t *testing.T) {
 	// (eg azure blob)
 	newFs(t)
 
-	fstest.TestMkdir(t, remote)
-	fstest.TestMkdir(t, remote)
+	err := remote.Mkdir("")
+	require.NoError(t, err)
+	fstest.CheckListing(t, remote, []fstest.Item{})
+
+	err = remote.Mkdir("")
+	require.NoError(t, err)
 }
 
 // TestFsMkdirRmdirSubdir tests making and removing a sub directory
 func TestFsMkdirRmdirSubdir(t *testing.T) {
 	skipIfNotOk(t)
 	dir := "dir/subdir"
-	err := fs.Mkdir(remote, dir)
+	err := operations.Mkdir(remote, dir)
 	require.NoError(t, err)
 	fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.Config.ModifyWindow)
 
-	err = fs.Rmdir(remote, dir)
+	err = operations.Rmdir(remote, dir)
 	require.NoError(t, err)
 	fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir"}, fs.Config.ModifyWindow)
 
-	err = fs.Rmdir(remote, "dir")
+	err = operations.Rmdir(remote, "dir")
 	require.NoError(t, err)
 	fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.Config.ModifyWindow)
 }
@@ -236,7 +250,7 @@ func objsToNames(objs []fs.Object) []string {
 // TestFsListDirEmpty tests listing the directories from an empty directory
 func TestFsListDirEmpty(t *testing.T) {
 	skipIfNotOk(t)
-	objs, dirs, err := fs.WalkGetAll(remote, "", true, 1)
+	objs, dirs, err := walk.GetAll(remote, "", true, 1)
 	require.NoError(t, err)
 	assert.Equal(t, []string{}, objsToNames(objs))
 	assert.Equal(t, []string{}, dirsToNames(dirs))
@@ -282,15 +296,15 @@ func testPut(t *testing.T, file *fstest.Item) string {
 again:
 	contents := fstest.RandomString(100)
 	buf := bytes.NewBufferString(contents)
-	hash := fs.NewMultiHasher()
+	hash := hash.NewMultiHasher()
 	in := io.TeeReader(buf, hash)
 
 	file.Size = int64(buf.Len())
-	obji := fs.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
+	obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
 	obj, err := remote.Put(in, obji)
 	if err != nil {
 		// Retry if err returned a retry error
-		if fs.IsRetryError(err) && tries < maxTries {
+		if fserrors.IsRetryError(err) && tries < maxTries {
 			t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
 			time.Sleep(2 * time.Second)
 
@@ -334,7 +348,7 @@ func TestFsPutError(t *testing.T) {
 	er := &errorReader{errors.New("potato")}
 	in := io.MultiReader(buf, er)
 
-	obji := fs.NewStaticObjectInfo(file2.Path, file2.ModTime, 100, true, nil, nil)
+	obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 100, true, nil, nil)
 	_, err := remote.Put(in, obji)
 	// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
 	assert.NotNil(t, err)
@@ -364,9 +378,9 @@ func TestFsListDirFile2(t *testing.T) {
 	list := func(dir string, expectedDirNames, expectedObjNames []string) {
 		var objNames, dirNames []string
 		for i := 1; i <= *fstest.ListRetries; i++ {
-			objs, dirs, err := fs.WalkGetAll(remote, dir, true, 1)
+			objs, dirs, err := walk.GetAll(remote, dir, true, 1)
 			if errors.Cause(err) == fs.ErrorDirNotFound {
-				objs, dirs, err = fs.WalkGetAll(remote, winPath(dir), true, 1)
+				objs, dirs, err = walk.GetAll(remote, winPath(dir), true, 1)
 			}
 			require.NoError(t, err)
 			objNames = objsToNames(objs)
@@ -413,7 +427,7 @@ func TestFsListDirRoot(t *testing.T) {
 	skipIfNotOk(t)
 	rootRemote, err := fs.NewFs(RemoteName)
 	require.NoError(t, err)
-	_, dirs, err := fs.WalkGetAll(rootRemote, "", true, 1)
+	_, dirs, err := walk.GetAll(rootRemote, "", true, 1)
 	require.NoError(t, err)
 	assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found")
 }
@@ -434,7 +448,7 @@ func TestFsListSubdir(t *testing.T) {
 	for i := 0; i < 2; i++ {
 		dir, _ := path.Split(fileName)
 		dir = dir[:len(dir)-1]
-		objs, dirs, err = fs.WalkGetAll(remote, dir, true, -1)
+		objs, dirs, err = walk.GetAll(remote, dir, true, -1)
 		if err != fs.ErrorDirNotFound {
 			break
 		}
@@ -455,7 +469,7 @@ func TestFsListRSubdir(t *testing.T) {
 // TestFsListLevel2 tests List works for 2 levels
 func TestFsListLevel2(t *testing.T) {
 	skipIfNotOk(t)
-	objs, dirs, err := fs.WalkGetAll(remote, "", true, 2)
+	objs, dirs, err := walk.GetAll(remote, "", true, 2)
 	if err == fs.ErrorLevelNotSupported {
 		return
 	}
@@ -676,7 +690,7 @@ func TestFsDirChangeNotify(t *testing.T) {
 		t.Skip("FS has no DirChangeNotify interface")
 	}
 
-	err := fs.Mkdir(remote, "dir")
+	err := operations.Mkdir(remote, "dir")
 	require.NoError(t, err)
 
 	changes := []string{}
@@ -685,7 +699,7 @@ func TestFsDirChangeNotify(t *testing.T) {
 	}, time.Second)
 	defer func() { close(quitChannel) }()
 
-	err = fs.Mkdir(remote, "dir/subdir")
+	err = operations.Mkdir(remote, "dir/subdir")
 	require.NoError(t, err)
 
 	time.Sleep(2 * time.Second)
@@ -817,12 +831,12 @@ func TestObjectUpdate(t *testing.T) {
 	skipIfNotOk(t)
 	contents := fstest.RandomString(200)
 	buf := bytes.NewBufferString(contents)
-	hash := fs.NewMultiHasher()
+	hash := hash.NewMultiHasher()
 	in := io.TeeReader(buf, hash)
 
 	file1.Size = int64(buf.Len())
 	obj := findObject(t, file1.Path)
-	obji := fs.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
+	obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
 	err := obj.Update(in, obji)
 	require.NoError(t, err)
 	file1.Hashes = hash.Sums()
@@ -896,15 +910,15 @@ again:
 	contentSize := 100
 	contents := fstest.RandomString(contentSize)
 	buf := bytes.NewBufferString(contents)
-	hash := fs.NewMultiHasher()
+	hash := hash.NewMultiHasher()
 	in := io.TeeReader(buf, hash)
 
 	file.Size = -1
-	obji := fs.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
+	obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
 	obj, err := remote.Features().PutStream(in, obji)
 	if err != nil {
 		// Retry if err returned a retry error
-		if fs.IsRetryError(err) && tries < maxTries {
+		if fserrors.IsRetryError(err) && tries < maxTries {
 			t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
 			time.Sleep(2 * time.Second)
 
@@ -924,8 +938,12 @@ again:
 // TestObjectPurge tests Purge
 func TestObjectPurge(t *testing.T) {
 	skipIfNotOk(t)
-	fstest.TestPurge(t, remote)
-	err := fs.Purge(remote)
+
+	err := operations.Purge(remote)
+	require.NoError(t, err)
+	fstest.CheckListing(t, remote, []fstest.Item{})
+
+	err = operations.Purge(remote)
 	assert.Error(t, err, "Expecting error after on second purge")
 }
 
diff --git a/fstest/mockdir/dir.go b/fstest/mockdir/dir.go
new file mode 100644
index 000000000..71edf665c
--- /dev/null
+++ b/fstest/mockdir/dir.go
@@ -0,0 +1,13 @@
+// Package mockdir makes a mock fs.Directory object
+package mockdir
+
+import (
+	"time"
+
+	"github.com/ncw/rclone/fs"
+)
+
+// New makes a mock directory object with the name given
+func New(name string) fs.Directory {
+	return fs.NewDir(name, time.Time{})
+}
diff --git a/fstest/mockobject/mockobject.go b/fstest/mockobject/mockobject.go
new file mode 100644
index 000000000..9c83987a3
--- /dev/null
+++ b/fstest/mockobject/mockobject.go
@@ -0,0 +1,71 @@
+// Package mockobject provides a mock object which can be created from a string
+package mockobject
+
+import (
+	"errors"
+	"io"
+	"time"
+
+	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/hash"
+)
+
+var errNotImpl = errors.New("not implemented")
+
+// Object is a mock fs.Object useful for testing
+type Object string
+
+// String returns a description of the Object
+func (o Object) String() string {
+	return string(o)
+}
+
+// Fs returns read only access to the Fs that this object is part of
+func (o Object) Fs() fs.Info {
+	return nil
+}
+
+// Remote returns the remote path
+func (o Object) Remote() string {
+	return string(o)
+}
+
+// Hash returns the selected checksum of the file
+// If no checksum is available it returns ""
+func (o Object) Hash(hash.Type) (string, error) {
+	return "", errNotImpl
+}
+
+// ModTime returns the modification date of the file
+// It should return a best guess if one isn't available
+func (o Object) ModTime() (t time.Time) {
+	return t
+}
+
+// Size returns the size of the file
+func (o Object) Size() int64 { return 0 }
+
+// Storable says whether this object can be stored
+func (o Object) Storable() bool {
+	return true
+}
+
+// SetModTime sets the metadata on the object to set the modification date
+func (o Object) SetModTime(time.Time) error {
+	return errNotImpl
+}
+
+// Open opens the file for read.  Call Close() on the returned io.ReadCloser
+func (o Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
+	return nil, errNotImpl
+}
+
+// Update in to the object with the modTime given of the given size
+func (o Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
+	return errNotImpl
+}
+
+// Remove this object
+func (o Object) Remove() error {
+	return errNotImpl
+}
diff --git a/fstest/run.go b/fstest/run.go
index cb54d1781..8a4008e24 100644
--- a/fstest/run.go
+++ b/fstest/run.go
@@ -37,7 +37,9 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
-	"github.com/stretchr/testify/assert"
+	"github.com/ncw/rclone/fs/fserrors"
+	"github.com/ncw/rclone/fs/object"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/stretchr/testify/require"
 )
 
@@ -127,7 +129,7 @@ func NewRun(t *testing.T) *Run {
 		*r = *oneRun
 		r.cleanRemote = func() {
 			var toDelete dirsToRemove
-			err := fs.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
+			err := walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
 				if err != nil {
 					if err == fs.ErrorDirNotFound {
 						return nil
@@ -231,13 +233,13 @@ func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time,
 	const maxTries = 10
 	for tries := 1; ; tries++ {
 		in := bytes.NewBufferString(content)
-		objinfo := fs.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, nil, nil)
+		objinfo := object.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, nil, nil)
 		_, err := put(in, objinfo)
 		if err == nil {
 			break
 		}
 		// Retry if err returned a retry error
-		if fs.IsRetryError(err) && tries < maxTries {
+		if fserrors.IsRetryError(err) && tries < maxTries {
 			r.Logf("Retry Put of %q to %v: %d/%d (%v)", remote, f, tries, maxTries, err)
 			time.Sleep(2 * time.Second)
 			continue
@@ -265,14 +267,15 @@ func (r *Run) WriteBoth(remote, content string, modTime time.Time) Item {
 
 // CheckWithDuplicates does a test but allows duplicates
 func (r *Run) CheckWithDuplicates(t *testing.T, items ...Item) {
-	objects, size, err := fs.Count(r.Fremote)
-	require.NoError(t, err)
-	assert.Equal(t, int64(len(items)), objects)
-	wantSize := int64(0)
-	for _, item := range items {
-		wantSize += item.Size
-	}
-	assert.Equal(t, wantSize, size)
+	panic("FIXME")
+	// objects, size, err := operations.Count(r.Fremote)
+	// require.NoError(t, err)
+	// assert.Equal(t, int64(len(items)), objects)
+	// wantSize := int64(0)
+	// for _, item := range items {
+	// 	wantSize += item.Size
+	// }
+	// assert.Equal(t, wantSize, size)
 }
 
 // Clean the temporary directory
diff --git a/fs/test_all.go b/fstest/test_all/test_all.go
similarity index 79%
rename from fs/test_all.go
rename to fstest/test_all/test_all.go
index eff83b6a3..47c088b0d 100644
--- a/fs/test_all.go
+++ b/fstest/test_all/test_all.go
@@ -1,22 +1,26 @@
-// +build ignore
-
-// Run tests for all the remotes
+// Run tests for all the remotes.  Run this with package names which
+// need integration testing.
 //
-// Run with go run test_all.go
+// See the `test` target in the Makefile.
 package main
 
 import (
 	"flag"
+	"go/build"
 	"log"
 	"os"
 	"os/exec"
+	"path"
 	"regexp"
 	"runtime"
 	"strings"
 	"time"
 
+	_ "github.com/ncw/rclone/backend/all" // import all fs
 	"github.com/ncw/rclone/fs"
-	_ "github.com/ncw/rclone/fs/all" // import all fs
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/list"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/ncw/rclone/fstest"
 )
 
@@ -129,17 +133,17 @@ var (
 			FastList: false,
 		},
 	}
-	binary = "fs.test"
 	// Flags
 	maxTries = flag.Int("maxtries", 5, "Number of times to try each test")
 	runTests = flag.String("remotes", "", "Comma separated list of remotes to test, eg 'TestSwift:,TestS3'")
 	clean    = flag.Bool("clean", false, "Instead of testing, clean all left over test directories")
-	runOnly  = flag.String("run-only", "", "Run only those tests matching the regexp supplied")
+	runOnly  = flag.String("run", "", "Run only those tests matching the regexp supplied")
 	timeout  = flag.Duration("timeout", 30*time.Minute, "Maximum time to run each test for before giving up")
 )
 
 // test holds info about a running test
 type test struct {
+	pkg         string
 	remote      string
 	subdir      bool
 	cmdLine     []string
@@ -152,11 +156,13 @@ type test struct {
 }
 
 // newTest creates a new test
-func newTest(remote string, subdir bool, fastlist bool) *test {
+func newTest(pkg, remote string, subdir bool, fastlist bool) *test {
+	binary := pkgBinary(pkg)
 	t := &test{
+		pkg:     pkg,
 		remote:  remote,
 		subdir:  subdir,
-		cmdLine: []string{"./" + binary, "-test.timeout", (*timeout).String(), "-remote", remote},
+		cmdLine: []string{binary, "-test.timeout", (*timeout).String(), "-remote", remote},
 		try:     1,
 	}
 	if *fstest.Verbose {
@@ -258,7 +264,7 @@ func (t *test) cleanFs() error {
 	if err != nil {
 		return err
 	}
-	entries, err := fs.ListDirSorted(f, true, "")
+	entries, err := list.DirSorted(f, true, "")
 	if err != nil {
 		return err
 	}
@@ -270,7 +276,7 @@ func (t *test) cleanFs() error {
 			if err != nil {
 				return err
 			}
-			return fs.Purge(dir)
+			return operations.Purge(dir)
 		}
 		return nil
 	})
@@ -317,23 +323,60 @@ func (t *test) run(result chan<- *test) {
 	result <- t
 }
 
-// makeTestBinary makes the binary we will run
-func makeTestBinary() {
+// GOPATH returns the current GOPATH
+func GOPATH() string {
+	gopath := os.Getenv("GOPATH")
+	if gopath == "" {
+		gopath = build.Default.GOPATH
+	}
+	return gopath
+}
+
+// turn a package name into a binary name
+func pkgBinaryName(pkg string) string {
+	binary := path.Base(pkg) + ".test"
 	if runtime.GOOS == "windows" {
 		binary += ".exe"
 	}
-	log.Printf("Making test binary %q", binary)
-	err := exec.Command("go", "test", "-c", "-o", binary).Run()
+	return binary
+}
+
+// turn a package name into a binary path
+func pkgBinary(pkg string) string {
+	return path.Join(pkgPath(pkg), pkgBinaryName(pkg))
+}
+
+// returns the path to the package
+func pkgPath(pkg string) string {
+	return path.Join(GOPATH(), "src", pkg)
+}
+
+// cd into the package directory
+func pkgChdir(pkg string) {
+	err := os.Chdir(pkgPath(pkg))
+	if err != nil {
+		log.Fatalf("Failed to chdir to package %q: %v", pkg, err)
+	}
+}
+
+// makeTestBinary makes the binary we will run
+func makeTestBinary(pkg string) {
+	binaryName := pkgBinaryName(pkg)
+	log.Printf("%s: Making test binary %q", pkg, binaryName)
+	pkgChdir(pkg)
+	err := exec.Command("go", "test", "-c", "-o", binaryName).Run()
 	if err != nil {
 		log.Fatalf("Failed to make test binary: %v", err)
 	}
+	binary := pkgBinary(pkg)
 	if _, err := os.Stat(binary); err != nil {
 		log.Fatalf("Couldn't find test binary %q", binary)
 	}
 }
 
 // removeTestBinary removes the binary made in makeTestBinary
-func removeTestBinary() {
+func removeTestBinary(pkg string) {
+	binary := pkgBinary(pkg)
 	err := os.Remove(binary) // Delete the binary when finished
 	if err != nil {
 		log.Printf("Error removing test binary %q: %v", binary, err)
@@ -342,6 +385,8 @@ func removeTestBinary() {
 
 func main() {
 	flag.Parse()
+	packages := flag.Args()
+	log.Printf("Testing packages: %s", strings.Join(packages, ", "))
 	if *runTests != "" {
 		newRemotes := []remoteConfig{}
 		for _, name := range strings.Split(*runTests, ",") {
@@ -365,10 +410,12 @@ func main() {
 
 	start := time.Now()
 	if *clean {
-		fs.LoadConfig()
+		config.LoadConfig()
 	} else {
-		makeTestBinary()
-		defer removeTestBinary()
+		for _, pkg := range packages {
+			makeTestBinary(pkg)
+			defer removeTestBinary(pkg)
+		}
 	}
 
 	// start the tests
@@ -379,12 +426,14 @@ func main() {
 		// Don't run -subdir and -fast-list if -clean
 		bools = bools[:1]
 	}
-	for _, remote := range remotes {
-		for _, subdir := range bools {
-			for _, fastlist := range bools {
-				if (!subdir || subdir && remote.SubDir) && (!fastlist || fastlist && remote.FastList) {
-					go newTest(remote.Name, subdir, fastlist).run(results)
-					awaiting++
+	for _, pkg := range packages {
+		for _, remote := range remotes {
+			for _, subdir := range bools {
+				for _, fastlist := range bools {
+					if (!subdir || subdir && remote.SubDir) && (!fastlist || fastlist && remote.FastList) {
+						go newTest(pkg, remote.Name, subdir, fastlist).run(results)
+						awaiting++
+					}
 				}
 			}
 		}
diff --git a/lib/oauthutil/oauthutil.go b/lib/oauthutil/oauthutil.go
index 529317d7c..8c65209a4 100644
--- a/lib/oauthutil/oauthutil.go
+++ b/lib/oauthutil/oauthutil.go
@@ -12,6 +12,8 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/fshttp"
 	"github.com/pkg/errors"
 	"github.com/skratchdot/open-golang/open"
 	"golang.org/x/net/context"
@@ -54,7 +56,7 @@ type oldToken struct {
 // GetToken returns the token saved in the config file under
 // section name.
 func GetToken(name string) (*oauth2.Token, error) {
-	tokenString := fs.ConfigFileGet(name, fs.ConfigToken)
+	tokenString := config.FileGet(name, config.ConfigToken)
 	if tokenString == "" {
 		return nil, errors.New("empty token found - please run rclone config again")
 	}
@@ -94,9 +96,9 @@ func PutToken(name string, token *oauth2.Token, newSection bool) error {
 		return err
 	}
 	tokenString := string(tokenBytes)
-	old := fs.ConfigFileGet(name, fs.ConfigToken)
+	old := config.FileGet(name, config.ConfigToken)
 	if tokenString != old {
-		err = fs.ConfigSetValueAndSave(name, fs.ConfigToken, tokenString)
+		err = config.SetValueAndSave(name, config.ConfigToken, tokenString)
 		if newSection && err != nil {
 			fs.Debugf(name, "Added new token to config, still needs to be saved")
 		} else if err != nil {
@@ -197,31 +199,31 @@ func Context(client *http.Client) context.Context {
 // config file if they are not blank.
 // If any value is overridden, true is returned.
 // the origConfig is copied
-func overrideCredentials(name string, origConfig *oauth2.Config) (config *oauth2.Config, changed bool) {
-	config = new(oauth2.Config)
-	*config = *origConfig
+func overrideCredentials(name string, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
+	newConfig = new(oauth2.Config)
+	*newConfig = *origConfig
 	changed = false
-	ClientID := fs.ConfigFileGet(name, fs.ConfigClientID)
+	ClientID := config.FileGet(name, config.ConfigClientID)
 	if ClientID != "" {
-		config.ClientID = ClientID
+		newConfig.ClientID = ClientID
 		changed = true
 	}
-	ClientSecret := fs.ConfigFileGet(name, fs.ConfigClientSecret)
+	ClientSecret := config.FileGet(name, config.ConfigClientSecret)
 	if ClientSecret != "" {
-		config.ClientSecret = ClientSecret
+		newConfig.ClientSecret = ClientSecret
 		changed = true
 	}
-	AuthURL := fs.ConfigFileGet(name, fs.ConfigAuthURL)
+	AuthURL := config.FileGet(name, config.ConfigAuthURL)
 	if AuthURL != "" {
-		config.Endpoint.AuthURL = AuthURL
+		newConfig.Endpoint.AuthURL = AuthURL
 		changed = true
 	}
-	TokenURL := fs.ConfigFileGet(name, fs.ConfigTokenURL)
+	TokenURL := config.FileGet(name, config.ConfigTokenURL)
 	if TokenURL != "" {
-		config.Endpoint.TokenURL = TokenURL
+		newConfig.Endpoint.TokenURL = TokenURL
 		changed = true
 	}
-	return config, changed
+	return newConfig, changed
 }
 
 // NewClientWithBaseClient gets a token from the config file and
@@ -252,8 +254,8 @@ func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *htt
 
 // NewClient gets a token from the config file and configures a Client
 // with it.  It returns the client and a TokenSource which Invalidate may need to be called on
-func NewClient(name string, config *oauth2.Config) (*http.Client, *TokenSource, error) {
-	return NewClientWithBaseClient(name, config, fs.Config.Client())
+func NewClient(name string, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
+	return NewClientWithBaseClient(name, oauthConfig, fshttp.NewClient(fs.Config))
 }
 
 // Config does the initial creation of the token
@@ -269,26 +271,26 @@ func ConfigNoOffline(id, name string, config *oauth2.Config, opts ...oauth2.Auth
 	return doConfig(id, name, config, false, opts)
 }
 
-func doConfig(id, name string, config *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
-	config, changed := overrideCredentials(name, config)
-	automatic := fs.ConfigFileGet(name, fs.ConfigAutomatic) != ""
+func doConfig(id, name string, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
+	oauthConfig, changed := overrideCredentials(name, oauthConfig)
+	automatic := config.FileGet(name, config.ConfigAutomatic) != ""
 
 	if changed {
-		fmt.Printf("Make sure your Redirect URL is set to %q in your custom config.\n", config.RedirectURL)
+		fmt.Printf("Make sure your Redirect URL is set to %q in your custom config.\n", RedirectURL)
 	}
 
 	// See if already have a token
-	tokenString := fs.ConfigFileGet(name, "token")
+	tokenString := config.FileGet(name, "token")
 	if tokenString != "" {
 		fmt.Printf("Already have a token - refresh?\n")
-		if !fs.Confirm() {
+		if !config.Confirm() {
 			return nil
 		}
 	}
 
 	// Detect whether we should use internal web server
 	useWebServer := false
-	switch config.RedirectURL {
+	switch RedirectURL {
 	case RedirectURL, RedirectPublicURL, RedirectLocalhostURL:
 		useWebServer = true
 		if automatic {
@@ -297,12 +299,12 @@ func doConfig(id, name string, config *oauth2.Config, offline bool, opts []oauth
 		fmt.Printf("Use auto config?\n")
 		fmt.Printf(" * Say Y if not sure\n")
 		fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
-		auto := fs.Confirm()
+		auto := config.Confirm()
 		if !auto {
 			fmt.Printf("For this to work, you will need rclone available on a machine that has a web browser available.\n")
 			fmt.Printf("Execute the following on your machine:\n")
 			if changed {
-				fmt.Printf("\trclone authorize %q %q %q\n", id, config.ClientID, config.ClientSecret)
+				fmt.Printf("\trclone authorize %q %q %q\n", id, oauthConfig.ClientID, oauthConfig.ClientSecret)
 			} else {
 				fmt.Printf("\trclone authorize %q\n", id)
 			}
@@ -310,7 +312,7 @@ func doConfig(id, name string, config *oauth2.Config, offline bool, opts []oauth
 			code := ""
 			for code == "" {
 				fmt.Printf("result> ")
-				code = strings.TrimSpace(fs.ReadLine())
+				code = strings.TrimSpace(config.ReadLine())
 			}
 			token := &oauth2.Token{}
 			err := json.Unmarshal([]byte(code), token)
@@ -325,13 +327,13 @@ func doConfig(id, name string, config *oauth2.Config, offline bool, opts []oauth
 			fmt.Printf("Use auto config?\n")
 			fmt.Printf(" * Say Y if not sure\n")
 			fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
-			useWebServer = fs.Confirm()
+			useWebServer = config.Confirm()
 		}
 		if useWebServer {
 			// copy the config and set to use the internal webserver
-			configCopy := *config
-			config = &configCopy
-			config.RedirectURL = RedirectURL
+			configCopy := *oauthConfig
+			oauthConfig = &configCopy
+			oauthConfig.RedirectURL = RedirectURL
 		}
 	}
 
@@ -345,7 +347,7 @@ func doConfig(id, name string, config *oauth2.Config, offline bool, opts []oauth
 	if offline {
 		opts = append(opts, oauth2.AccessTypeOffline)
 	}
-	authURL := config.AuthCodeURL(state, opts...)
+	authURL := oauthConfig.AuthCodeURL(state, opts...)
 
 	// Prepare webserver
 	server := authServer{
@@ -378,9 +380,9 @@ func doConfig(id, name string, config *oauth2.Config, offline bool, opts []oauth
 	} else {
 		// Read the code, and exchange it for a token.
 		fmt.Printf("Enter verification code> ")
-		authCode = fs.ReadLine()
+		authCode = config.ReadLine()
 	}
-	token, err := config.Exchange(oauth2.NoContext, authCode)
+	token, err := oauthConfig.Exchange(oauth2.NoContext, authCode)
 	if err != nil {
 		return errors.Wrap(err, "failed to get token")
 	}
diff --git a/lib/pacer/pacer.go b/lib/pacer/pacer.go
index d287b3ade..fb9ae9977 100644
--- a/lib/pacer/pacer.go
+++ b/lib/pacer/pacer.go
@@ -7,6 +7,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/fserrors"
 )
 
 // Pacer state
@@ -339,7 +340,7 @@ func (p *Pacer) call(fn Paced, retries int) (err error) {
 		fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
 	}
 	if retry {
-		err = fs.RetryError(err)
+		err = fserrors.RetryError(err)
 	}
 	return err
 }
diff --git a/lib/pacer/pacer_test.go b/lib/pacer/pacer_test.go
index dfee0352b..7dc91fbf2 100644
--- a/lib/pacer/pacer_test.go
+++ b/lib/pacer/pacer_test.go
@@ -6,6 +6,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/fserrors"
 	"github.com/pkg/errors"
 )
 
@@ -401,7 +402,7 @@ func Test_callRetry(t *testing.T) {
 	if err == errFoo {
 		t.Errorf("err didn't want %v got %v", errFoo, err)
 	}
-	_, ok := err.(fs.Retrier)
+	_, ok := err.(fserrors.Retrier)
 	if !ok {
 		t.Errorf("didn't return a retry error")
 	}
@@ -415,7 +416,7 @@ func TestCall(t *testing.T) {
 	if dp.called != 20 {
 		t.Errorf("called want %d got %d", 20, dp.called)
 	}
-	_, ok := err.(fs.Retrier)
+	_, ok := err.(fserrors.Retrier)
 	if !ok {
 		t.Errorf("didn't return a retry error")
 	}
@@ -429,7 +430,7 @@ func TestCallNoRetry(t *testing.T) {
 	if dp.called != 1 {
 		t.Errorf("called want %d got %d", 1, dp.called)
 	}
-	_, ok := err.(fs.Retrier)
+	_, ok := err.(fserrors.Retrier)
 	if !ok {
 		t.Errorf("didn't return a retry error")
 	}
diff --git a/fs/counting_reader.go b/lib/readers/counting_reader.go
similarity index 97%
rename from fs/counting_reader.go
rename to lib/readers/counting_reader.go
index 20629dad3..872b4c50e 100644
--- a/fs/counting_reader.go
+++ b/lib/readers/counting_reader.go
@@ -1,4 +1,4 @@
-package fs
+package readers
 
 import "io"
 
diff --git a/lib/readers/readfill.go b/lib/readers/readfill.go
new file mode 100644
index 000000000..64b5de44e
--- /dev/null
+++ b/lib/readers/readfill.go
@@ -0,0 +1,18 @@
+package readers
+
+import "io"
+
+// ReadFill reads as much data from r into buf as it can
+//
+// It reads until the buffer is full or r.Read returned an error.
+//
+// This is io.ReadFull but when you just want as much data as
+// possible, not an exact size of block.
+func ReadFill(r io.Reader, buf []byte) (n int, err error) {
+	var nn int
+	for n < len(buf) && err == nil {
+		nn, err = r.Read(buf[n:])
+		n += nn
+	}
+	return n, err
+}
diff --git a/lib/readers/readfill_test.go b/lib/readers/readfill_test.go
new file mode 100644
index 000000000..264fc721e
--- /dev/null
+++ b/lib/readers/readfill_test.go
@@ -0,0 +1,42 @@
+package readers
+
+import (
+	"io"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+type byteReader struct {
+	c byte
+}
+
+func (br *byteReader) Read(p []byte) (n int, err error) {
+	if br.c == 0 {
+		err = io.EOF
+	} else if len(p) >= 1 {
+		p[0] = br.c
+		n = 1
+		br.c--
+	}
+	return
+}
+
+func TestReadFill(t *testing.T) {
+	buf := []byte{9, 9, 9, 9, 9}
+
+	n, err := ReadFill(&byteReader{0}, buf)
+	assert.Equal(t, io.EOF, err)
+	assert.Equal(t, 0, n)
+	assert.Equal(t, []byte{9, 9, 9, 9, 9}, buf)
+
+	n, err = ReadFill(&byteReader{3}, buf)
+	assert.Equal(t, io.EOF, err)
+	assert.Equal(t, 3, n)
+	assert.Equal(t, []byte{3, 2, 1, 9, 9}, buf)
+
+	n, err = ReadFill(&byteReader{8}, buf)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, 5, n)
+	assert.Equal(t, []byte{8, 7, 6, 5, 4}, buf)
+}
diff --git a/fs/readers.go b/lib/readers/repeatable.go
similarity index 99%
rename from fs/readers.go
rename to lib/readers/repeatable.go
index c7a9271a5..ab0073581 100644
--- a/fs/readers.go
+++ b/lib/readers/repeatable.go
@@ -1,4 +1,4 @@
-package fs
+package readers
 
 import (
 	"io"
diff --git a/fs/readers_test.go b/lib/readers/repeatable_test.go
similarity index 99%
rename from fs/readers_test.go
rename to lib/readers/repeatable_test.go
index 713177f8c..329880b2e 100644
--- a/fs/readers_test.go
+++ b/lib/readers/repeatable_test.go
@@ -1,4 +1,4 @@
-package fs
+package readers
 
 import (
 	"bytes"
diff --git a/vfs/cache.go b/vfs/cache.go
index 838f37f20..55946c6dc 100644
--- a/vfs/cache.go
+++ b/vfs/cache.go
@@ -15,6 +15,7 @@ import (
 
 	"github.com/djherbis/times"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 )
@@ -94,7 +95,7 @@ func newCache(ctx context.Context, f fs.Fs, opt *Options) (*cache, error) {
 		}
 		fRoot = strings.Replace(fRoot, ":", "", -1)
 	}
-	root := filepath.Join(fs.CacheDir, "vfs", f.Name(), fRoot)
+	root := filepath.Join(config.CacheDir, "vfs", f.Name(), fRoot)
 	fs.Debugf(nil, "vfs cache root is %q", root)
 
 	f, err := fs.NewFs(root)
diff --git a/vfs/dir.go b/vfs/dir.go
index 1637c0bd2..017828e99 100644
--- a/vfs/dir.go
+++ b/vfs/dir.go
@@ -9,6 +9,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/list"
 	"github.com/pkg/errors"
 )
 
@@ -179,7 +180,7 @@ func (d *Dir) _readDir() error {
 		}
 		fs.Debugf(d.path, "Re-reading directory (%v old)", age)
 	}
-	entries, err := fs.ListDirSorted(d.f, false, d.path)
+	entries, err := list.DirSorted(d.f, false, d.path)
 	if err == fs.ErrorDirNotFound {
 		// We treat directory not found as empty because we
 		// create directories on the fly
diff --git a/vfs/file.go b/vfs/file.go
index f3cd32ba6..2e9f1ad58 100644
--- a/vfs/file.go
+++ b/vfs/file.go
@@ -8,6 +8,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"github.com/pkg/errors"
 )
 
@@ -355,7 +356,7 @@ func (f *File) VFS() *VFS {
 //
 // We ignore O_SYNC and O_EXCL
 func (f *File) Open(flags int) (fd Handle, err error) {
-	defer fs.Trace(f, "flags=%s", decodeOpenFlags(flags))("fd=%v, err=%v", &fd, &err)
+	defer log.Trace(f, "flags=%s", decodeOpenFlags(flags))("fd=%v, err=%v", &fd, &err)
 	var (
 		write    bool // if set need write support
 		read     bool // if set need read support
diff --git a/vfs/read.go b/vfs/read.go
index f046b3b35..a51eea52e 100644
--- a/vfs/read.go
+++ b/vfs/read.go
@@ -6,6 +6,8 @@ import (
 	"sync"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/accounting"
+	"github.com/ncw/rclone/fs/hash"
 	"github.com/pkg/errors"
 )
 
@@ -14,7 +16,7 @@ type ReadFileHandle struct {
 	baseHandle
 	mu         sync.Mutex
 	closed     bool // set if handle has been closed
-	r          *fs.Account
+	r          *accounting.Account
 	o          fs.Object
 	readCalled bool  // set if read has been called
 	size       int64 // size of the object
@@ -22,7 +24,7 @@ type ReadFileHandle struct {
 	roffset    int64 // offset of Read() calls
 	noSeek     bool
 	file       *File
-	hash       *fs.MultiHasher
+	hash       *hash.MultiHasher
 	opened     bool
 }
 
@@ -35,10 +37,10 @@ var (
 )
 
 func newReadFileHandle(f *File, o fs.Object) (*ReadFileHandle, error) {
-	var hash *fs.MultiHasher
+	var mhash *hash.MultiHasher
 	var err error
 	if !f.d.vfs.Opt.NoChecksum {
-		hash, err = fs.NewMultiHasherTypes(o.Fs().Hashes())
+		mhash, err = hash.NewMultiHasherTypes(o.Fs().Hashes())
 		if err != nil {
 			fs.Errorf(o.Fs(), "newReadFileHandle hash error: %v", err)
 		}
@@ -48,7 +50,7 @@ func newReadFileHandle(f *File, o fs.Object) (*ReadFileHandle, error) {
 		o:      o,
 		noSeek: f.d.vfs.Opt.NoSeek,
 		file:   f,
-		hash:   hash,
+		hash:   mhash,
 		size:   o.Size(),
 	}
 	return fh, nil
@@ -64,9 +66,9 @@ func (fh *ReadFileHandle) openPending() (err error) {
 	if err != nil {
 		return err
 	}
-	fh.r = fs.NewAccount(r, fh.o).WithBuffer() // account the transfer
+	fh.r = accounting.NewAccount(r, fh.o).WithBuffer() // account the transfer
 	fh.opened = true
-	fs.Stats.Transferring(fh.o.Remote())
+	accounting.Stats.Transferring(fh.o.Remote())
 	return nil
 }
 
@@ -269,7 +271,7 @@ func (fh *ReadFileHandle) checkHash() error {
 		if err != nil {
 			return err
 		}
-		if !fs.HashEquals(dstSum, srcSum) {
+		if !hash.Equals(dstSum, srcSum) {
 			return errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, dstSum, srcSum)
 		}
 	}
@@ -322,7 +324,7 @@ func (fh *ReadFileHandle) close() error {
 	fh.closed = true
 
 	if fh.opened {
-		fs.Stats.DoneTransferring(fh.o.Remote(), true)
+		accounting.Stats.DoneTransferring(fh.o.Remote(), true)
 		// Close first so that we have hashes
 		err := fh.r.Close()
 		if err != nil {
diff --git a/vfs/read_write.go b/vfs/read_write.go
index c95e26381..ef2300243 100644
--- a/vfs/read_write.go
+++ b/vfs/read_write.go
@@ -7,6 +7,8 @@ import (
 	"sync"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/pkg/errors"
 )
 
@@ -84,7 +86,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
 	if fh.flags&os.O_TRUNC == 0 && !truncate {
 		// Fetch the file if it hasn't changed
 		// FIXME retries
-		err = fs.CopyFile(fh.d.vfs.cache.f, fh.d.vfs.f, fh.remote, fh.remote)
+		err = operations.CopyFile(fh.d.vfs.cache.f, fh.d.vfs.f, fh.remote, fh.remote)
 		if err != nil {
 			// if the object wasn't found AND O_CREATE is set then...
 			cause := errors.Cause(err)
@@ -164,7 +166,7 @@ func (fh *RWFileHandle) Node() Node {
 // Note that we leave the file around in the cache on error conditions
 // to give the user a chance to recover it.
 func (fh *RWFileHandle) close() (err error) {
-	defer fs.Trace(fh.remote, "")("err=%v", &err)
+	defer log.Trace(fh.remote, "")("err=%v", &err)
 	if fh.closed {
 		return ECLOSED
 	}
@@ -224,9 +226,9 @@ func (fh *RWFileHandle) close() (err error) {
 	// Transfer the temp file to the remote
 	// FIXME retries
 	if fh.d.vfs.Opt.CacheMode < CacheModeFull {
-		err = fs.MoveFile(fh.d.vfs.f, fh.d.vfs.cache.f, fh.remote, fh.remote)
+		err = operations.MoveFile(fh.d.vfs.f, fh.d.vfs.cache.f, fh.remote, fh.remote)
 	} else {
-		err = fs.CopyFile(fh.d.vfs.f, fh.d.vfs.cache.f, fh.remote, fh.remote)
+		err = operations.CopyFile(fh.d.vfs.f, fh.d.vfs.cache.f, fh.remote, fh.remote)
 	}
 	if err != nil {
 		err = errors.Wrap(err, "failed to transfer file from cache to remote")
diff --git a/vfs/vfs.go b/vfs/vfs.go
index 4d20e89a9..5a491b273 100644
--- a/vfs/vfs.go
+++ b/vfs/vfs.go
@@ -27,6 +27,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/log"
 	"golang.org/x/net/context" // switch to "context" when we stop supporting go1.6
 )
 
@@ -256,7 +257,7 @@ func (vfs *VFS) FlushDirCache() {
 // WaitForWriters sleeps until all writers have finished or
 // time.Duration has elapsed
 func (vfs *VFS) WaitForWriters(timeout time.Duration) {
-	defer fs.Trace(nil, "timeout=%v", timeout)("")
+	defer log.Trace(nil, "timeout=%v", timeout)("")
 	const tickTime = 1 * time.Second
 	deadline := time.NewTimer(timeout)
 	defer deadline.Stop()
@@ -391,7 +392,7 @@ func decodeOpenFlags(flags int) string {
 
 // OpenFile a file according to the flags and perm provided
 func (vfs *VFS) OpenFile(name string, flags int, perm os.FileMode) (fd Handle, err error) {
-	defer fs.Trace(name, "flags=%s, perm=%v", decodeOpenFlags(flags), perm)("fd=%v, err=%v", &fd, &err)
+	defer log.Trace(name, "flags=%s, perm=%v", decodeOpenFlags(flags), perm)("fd=%v, err=%v", &fd, &err)
 	node, err := vfs.Stat(name)
 	if err != nil {
 		if err != ENOENT || flags&os.O_CREATE == 0 {
diff --git a/vfs/vfsflags/vfsflags.go b/vfs/vfsflags/vfsflags.go
index fdf3316a2..85c17a120 100644
--- a/vfs/vfsflags/vfsflags.go
+++ b/vfs/vfsflags/vfsflags.go
@@ -2,7 +2,7 @@
 package vfsflags
 
 import (
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config/flags"
 	"github.com/ncw/rclone/vfs"
 	"github.com/spf13/pflag"
 )
@@ -13,15 +13,15 @@ var (
 )
 
 // AddFlags adds the non filing system specific flags to the command
-func AddFlags(flags *pflag.FlagSet) {
-	fs.BoolVarP(flags, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up).")
-	fs.BoolVarP(flags, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download.")
-	fs.BoolVarP(flags, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files.")
-	fs.DurationVarP(flags, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for.")
-	fs.DurationVarP(flags, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable.")
-	fs.BoolVarP(flags, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Mount read-only.")
-	fs.FlagsVarP(flags, &Opt.CacheMode, "vfs-cache-mode", "", "Cache mode off|minimal|writes|full")
-	fs.DurationVarP(flags, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects.")
-	fs.DurationVarP(flags, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max age of objects in the cache.")
-	platformFlags(flags)
+func AddFlags(flagSet *pflag.FlagSet) {
+	flags.BoolVarP(flagSet, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up).")
+	flags.BoolVarP(flagSet, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download.")
+	flags.BoolVarP(flagSet, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files.")
+	flags.DurationVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for.")
+	flags.DurationVarP(flagSet, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable.")
+	flags.BoolVarP(flagSet, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Mount read-only.")
+	flags.FVarP(flagSet, &Opt.CacheMode, "vfs-cache-mode", "", "Cache mode off|minimal|writes|full")
+	flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects.")
+	flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max age of objects in the cache.")
+	platformFlags(flagSet)
 }
diff --git a/vfs/vfsflags/vfsflags_unix.go b/vfs/vfsflags/vfsflags_unix.go
index 044a55ed0..66adf03d3 100644
--- a/vfs/vfsflags/vfsflags_unix.go
+++ b/vfs/vfsflags/vfsflags_unix.go
@@ -3,18 +3,18 @@
 package vfsflags
 
 import (
-	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config/flags"
 	"github.com/spf13/pflag"
 	"golang.org/x/sys/unix"
 )
 
 // add any extra platform specific flags
-func platformFlags(flags *pflag.FlagSet) {
-	fs.IntVarP(flags, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem.")
+func platformFlags(flagSet *pflag.FlagSet) {
+	flags.IntVarP(flagSet, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem.")
 	Opt.Umask = unix.Umask(0) // read the umask
 	unix.Umask(Opt.Umask)     // set it back to what it was
 	Opt.UID = uint32(unix.Geteuid())
 	Opt.GID = uint32(unix.Getegid())
-	fs.Uint32VarP(flags, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem.")
-	fs.Uint32VarP(flags, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem.")
+	flags.Uint32VarP(flagSet, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem.")
+	flags.Uint32VarP(flagSet, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem.")
 }
diff --git a/vfs/write.go b/vfs/write.go
index 897a8cc1f..8c1b47156 100644
--- a/vfs/write.go
+++ b/vfs/write.go
@@ -7,6 +7,7 @@ import (
 	"time"
 
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 )
 
 // WriteFileHandle is an open for write handle on a File
@@ -64,7 +65,7 @@ func (fh *WriteFileHandle) openPending() (err error) {
 	pipeReader, fh.pipeWriter = io.Pipe()
 	go func() {
 		// NB Rcat deals with Stats.Transferring etc
-		o, err := fs.Rcat(fh.file.d.f, fh.remote, pipeReader, time.Now())
+		o, err := operations.Rcat(fh.file.d.f, fh.remote, pipeReader, time.Now())
 		if err != nil {
 			fs.Errorf(fh.remote, "WriteFileHandle.New Rcat failed: %v", err)
 		}