2013-06-27 21:00:01 +02:00
|
|
|
// Sync files and directories to and from local and remote object stores
|
2013-06-27 20:51:03 +02:00
|
|
|
//
|
2012-11-18 18:32:31 +01:00
|
|
|
// Nick Craig-Wood <nick@craig-wood.com>
|
|
|
|
package main
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
// FIXME only attach the remote flags when using a remote???
|
|
|
|
// would probably mean bringing all the flags in to here? Or define some flagsets in fs...
|
|
|
|
|
2012-11-18 18:32:31 +01:00
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"os"
|
2016-06-21 19:01:53 +02:00
|
|
|
"path"
|
2016-08-03 22:35:33 +02:00
|
|
|
"path/filepath"
|
2012-11-29 23:13:58 +01:00
|
|
|
"runtime"
|
2012-11-18 18:32:31 +01:00
|
|
|
"runtime/pprof"
|
2016-08-03 22:35:33 +02:00
|
|
|
"strings"
|
2016-08-03 18:50:27 +02:00
|
|
|
"sync"
|
2013-01-03 23:50:00 +01:00
|
|
|
"time"
|
2014-03-15 17:06:11 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
"github.com/spf13/cobra"
|
2016-08-03 22:35:33 +02:00
|
|
|
"github.com/spf13/cobra/doc"
|
2015-08-29 19:14:24 +02:00
|
|
|
"github.com/spf13/pflag"
|
2014-03-27 17:55:29 +01:00
|
|
|
|
2014-03-15 17:06:11 +01:00
|
|
|
"github.com/ncw/rclone/fs"
|
2016-01-03 15:12:01 +01:00
|
|
|
_ "github.com/ncw/rclone/fs/all" // import all fs
|
2012-11-18 18:32:31 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// Globals
|
|
|
|
var (
|
|
|
|
// Flags
|
2016-01-09 16:25:48 +01:00
|
|
|
cpuProfile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
|
|
|
memProfile = pflag.String("memprofile", "", "Write memory profile to file")
|
2015-02-27 16:22:26 +01:00
|
|
|
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
|
2016-08-03 00:56:20 +02:00
|
|
|
version bool
|
2015-02-27 17:51:17 +01:00
|
|
|
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
|
2015-08-20 22:07:00 +02:00
|
|
|
retries = pflag.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
2016-08-03 18:35:29 +02:00
|
|
|
dedupeMode = fs.DeduplicateInteractive
|
2012-11-18 18:32:31 +01:00
|
|
|
)
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
var rootCmd = &cobra.Command{
|
|
|
|
Use: "rclone",
|
|
|
|
Short: "Sync files and directories to and from local and remote object stores - " + fs.Version,
|
|
|
|
Long: `
|
|
|
|
Rclone is a command line program to sync files and directories to and
|
|
|
|
from various cloud storage systems, such as:
|
2012-12-04 00:58:17 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
* Google Drive
|
|
|
|
* Amazon S3
|
|
|
|
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
|
|
|
* Dropbox
|
|
|
|
* Google Cloud Storage
|
|
|
|
* Amazon Drive
|
|
|
|
* Microsoft One Drive
|
|
|
|
* Hubic
|
|
|
|
* Backblaze B2
|
|
|
|
* Yandex Disk
|
|
|
|
* The local filesystem
|
2012-12-04 00:58:17 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
Features
|
2012-12-04 00:58:17 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
* MD5/SHA1 hashes checked at all times for file integrity
|
|
|
|
* Timestamps preserved on files
|
|
|
|
* Partial syncs supported on a whole file basis
|
|
|
|
* Copy mode to just copy new/changed files
|
|
|
|
* Sync (one way) mode to make a directory identical
|
|
|
|
* Check mode to check for file hash equality
|
|
|
|
* Can sync to and from network, eg two different cloud accounts
|
2012-12-04 00:58:17 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
See the home page for installation, usage, documentation, changelog
|
|
|
|
and configuration walkthroughs.
|
2012-12-04 00:58:17 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
* http://rclone.org/
|
|
|
|
`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
if version {
|
|
|
|
showVersion()
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
},
|
2012-12-04 00:58:17 +01:00
|
|
|
}
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
func init() {
|
|
|
|
rootCmd.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
|
|
|
|
rootCmd.AddCommand(copyCmd, syncCmd, moveCmd, lsCmd, lsdCmd,
|
|
|
|
lslCmd, md5sumCmd, sha1sumCmd, sizeCmd, mkdirCmd,
|
|
|
|
rmdirCmd, purgeCmd, deleteCmd, checkCmd, dedupeCmd,
|
2016-08-03 22:35:33 +02:00
|
|
|
genautocompleteCmd, gendocsCmd, configCmd, authorizeCmd,
|
2016-08-03 21:41:08 +02:00
|
|
|
cleanupCmd, memtestCmd, versionCmd)
|
2016-08-03 18:35:29 +02:00
|
|
|
dedupeCmd.Flags().VarP(&dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
2016-08-03 00:56:20 +02:00
|
|
|
cobra.OnInitialize(initConfig)
|
2012-12-04 00:58:17 +01:00
|
|
|
}
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
func showVersion() {
|
|
|
|
fmt.Printf("rclone %s\n", fs.Version)
|
2014-03-27 18:26:24 +01:00
|
|
|
}
|
|
|
|
|
2016-06-21 19:01:53 +02:00
|
|
|
// NewFsSrc creates a src Fs from a name
|
2016-08-03 00:56:20 +02:00
|
|
|
//
|
|
|
|
// This can point to a file
|
2016-06-21 19:01:53 +02:00
|
|
|
func NewFsSrc(remote string) fs.Fs {
|
|
|
|
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
|
|
|
if err != nil {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
|
|
|
}
|
|
|
|
f, err := fsInfo.NewFs(configName, fsPath)
|
|
|
|
if err == fs.ErrorIsFile {
|
|
|
|
if !fs.Config.Filter.InActive() {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatalf("Can't limit to single files when using filters: %v", remote)
|
|
|
|
}
|
|
|
|
// Limit transfers to this file
|
2016-06-25 22:23:20 +02:00
|
|
|
err = fs.Config.Filter.AddFile(path.Base(fsPath))
|
2016-06-26 22:10:20 +02:00
|
|
|
// Set --no-traverse as only one file
|
|
|
|
fs.Config.NoTraverse = true
|
2016-06-25 22:23:20 +02:00
|
|
|
}
|
|
|
|
if err != nil {
|
2016-06-21 19:01:53 +02:00
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
|
|
|
}
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
// NewFsDst creates a dst Fs from a name
|
|
|
|
//
|
|
|
|
// This must point to a directory
|
|
|
|
func NewFsDst(remote string) fs.Fs {
|
2014-03-27 18:26:24 +01:00
|
|
|
f, err := fs.NewFs(remote)
|
|
|
|
if err != nil {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
|
|
|
}
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
// Create a new src and dst fs from the arguments
|
|
|
|
func newFsSrcDst(args []string) (fs.Fs, fs.Fs) {
|
|
|
|
fsrc, fdst := NewFsSrc(args[0]), NewFsDst(args[1])
|
|
|
|
fs.CalculateModifyWindow(fdst, fsrc)
|
|
|
|
return fdst, fsrc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new src fs from the arguments
|
|
|
|
func newFsSrc(args []string) fs.Fs {
|
|
|
|
fsrc := NewFsSrc(args[0])
|
|
|
|
fs.CalculateModifyWindow(fsrc)
|
|
|
|
return fsrc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new dst fs from the arguments
|
|
|
|
//
|
|
|
|
// Dst fs-es can't point to single files
|
|
|
|
func newFsDst(args []string) fs.Fs {
|
|
|
|
fdst := NewFsDst(args[0])
|
|
|
|
fs.CalculateModifyWindow(fdst)
|
|
|
|
return fdst
|
|
|
|
}
|
|
|
|
|
|
|
|
// run the function with stats and retries if required
|
|
|
|
func run(Retry bool, cmd *cobra.Command, f func() error) {
|
|
|
|
var err error
|
|
|
|
stopStats := startStats()
|
|
|
|
for try := 1; try <= *retries; try++ {
|
|
|
|
err = f()
|
|
|
|
if !Retry || (err == nil && !fs.Stats.Errored()) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if fs.IsFatalError(err) {
|
|
|
|
fs.Log(nil, "Fatal error received - not attempting retries")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if fs.IsNoRetryError(err) {
|
|
|
|
fs.Log(nil, "Can't retry this error - not attempting retries")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
|
|
|
|
} else {
|
|
|
|
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
|
2014-03-27 18:26:24 +01:00
|
|
|
}
|
2016-08-03 00:56:20 +02:00
|
|
|
if try < *retries {
|
|
|
|
fs.Stats.ResetErrors()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(stopStats)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Failed to %s: %v", cmd.Name(), err)
|
|
|
|
}
|
|
|
|
if !fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0 {
|
|
|
|
fs.Log(nil, "%s", fs.Stats)
|
|
|
|
}
|
|
|
|
if fs.Config.Verbose {
|
|
|
|
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
|
|
|
|
}
|
|
|
|
if fs.Stats.Errored() {
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
2014-03-27 18:26:24 +01:00
|
|
|
}
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
// checkArgs checks there are enough arguments and prints a message if not
|
|
|
|
func checkArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
|
|
|
|
if len(args) < MinArgs {
|
|
|
|
_ = cmd.Usage()
|
|
|
|
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments mininum\n", cmd.Name(), MinArgs)
|
|
|
|
os.Exit(1)
|
|
|
|
} else if len(args) > MaxArgs {
|
|
|
|
_ = cmd.Usage()
|
|
|
|
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
|
|
|
|
os.Exit(1)
|
2014-04-24 18:59:05 +02:00
|
|
|
}
|
2016-08-03 00:56:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// startStats prints the stats every statsInterval
|
|
|
|
//
|
|
|
|
// It returns a channel which should be closed to stop the stats.
|
|
|
|
func startStats() chan struct{} {
|
|
|
|
stopStats := make(chan struct{})
|
|
|
|
if *statsInterval > 0 {
|
|
|
|
go func() {
|
|
|
|
ticker := time.NewTicker(*statsInterval)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
fs.Stats.Log()
|
|
|
|
case <-stopStats:
|
|
|
|
ticker.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
return stopStats
|
|
|
|
}
|
2012-12-29 12:35:41 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
// The commands
|
|
|
|
var copyCmd = &cobra.Command{
|
|
|
|
Use: "copy source:path dest:path",
|
|
|
|
Short: `Copy files from source to dest, skipping already copied`,
|
|
|
|
Long: `
|
|
|
|
Copy the source to the destination. Doesn't transfer
|
|
|
|
unchanged files, testing by size and modification time or
|
2016-08-03 22:36:28 +02:00
|
|
|
MD5SUM. Doesn't delete files from the destination.
|
|
|
|
|
|
|
|
Note that it is always the contents of the directory that is synced,
|
|
|
|
not the directory so when source:path is a directory, it's the
|
|
|
|
contents of source:path that are copied, not the directory name and
|
|
|
|
contents.
|
|
|
|
|
|
|
|
If dest:path doesn't exist, it is created and the source:path contents
|
|
|
|
go there.
|
|
|
|
|
|
|
|
For example
|
|
|
|
|
|
|
|
rclone copy source:sourcepath dest:destpath
|
|
|
|
|
|
|
|
Let's say there are two files in sourcepath
|
|
|
|
|
|
|
|
sourcepath/one.txt
|
|
|
|
sourcepath/two.txt
|
|
|
|
|
|
|
|
This copies them to
|
|
|
|
|
|
|
|
destpath/one.txt
|
|
|
|
destpath/two.txt
|
|
|
|
|
|
|
|
Not to
|
|
|
|
|
|
|
|
destpath/sourcepath/one.txt
|
|
|
|
destpath/sourcepath/two.txt
|
|
|
|
|
|
|
|
If you are familiar with ` + "`" + `rsync` + "`" + `, rclone always works as if you had
|
|
|
|
written a trailing / - meaning "copy the contents of this directory".
|
|
|
|
This applies to all commands and whether you are talking about the
|
|
|
|
source or destination.
|
|
|
|
|
|
|
|
See the ` + "`" + `--no-traverse` + "`" + ` option for controlling whether rclone lists
|
|
|
|
the destination directory or not.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(2, 2, cmd, args)
|
|
|
|
fsrc, fdst := newFsSrcDst(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.CopyDir(fdst, fsrc)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var syncCmd = &cobra.Command{
|
|
|
|
Use: "sync source:path dest:path",
|
|
|
|
Short: `Make source and dest identical, modifying destination only.`,
|
|
|
|
Long: `
|
|
|
|
Sync the source to the destination, changing the destination
|
|
|
|
only. Doesn't transfer unchanged files, testing by size and
|
|
|
|
modification time or MD5SUM. Destination is updated to match
|
2016-08-03 22:36:28 +02:00
|
|
|
source, including deleting files if necessary.
|
|
|
|
|
|
|
|
**Important**: Since this can cause data loss, test first with the
|
|
|
|
` + "`" + `--dry-run` + "`" + ` flag to see exactly what would be copied and deleted.
|
|
|
|
|
|
|
|
Note that files in the destination won't be deleted if there were any
|
|
|
|
errors at any point.
|
|
|
|
|
|
|
|
It is always the contents of the directory that is synced, not the
|
|
|
|
directory so when source:path is a directory, it's the contents of
|
|
|
|
source:path that are copied, not the directory name and contents. See
|
|
|
|
extended explanation in the ` + "`" + `copy` + "`" + ` command above if unsure.
|
|
|
|
|
|
|
|
If dest:path doesn't exist, it is created and the source:path contents
|
|
|
|
go there.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(2, 2, cmd, args)
|
|
|
|
fsrc, fdst := newFsSrcDst(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.Sync(fdst, fsrc)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var moveCmd = &cobra.Command{
|
|
|
|
Use: "move source:path dest:path",
|
|
|
|
Short: `Move files from source to dest.`,
|
|
|
|
Long: `
|
2016-08-03 22:36:28 +02:00
|
|
|
Moves the contents of the source directory to the destination
|
|
|
|
directory. Rclone will error if the source and destination overlap.
|
|
|
|
|
|
|
|
If no filters are in use and if possible this will server side move
|
|
|
|
` + "`" + `source:path` + "`" + ` into ` + "`" + `dest:path` + "`" + `. After this ` + "`" + `source:path` + "`" + ` will no
|
|
|
|
longer longer exist.
|
|
|
|
|
|
|
|
Otherwise for each file in ` + "`" + `source:path` + "`" + ` selected by the filters (if
|
|
|
|
any) this will move it into ` + "`" + `dest:path` + "`" + `. If possible a server side
|
|
|
|
move will be used, otherwise it will copy it (server side if possible)
|
|
|
|
into ` + "`" + `dest:path` + "`" + ` then delete the original (if no errors on copy) in
|
|
|
|
` + "`" + `source:path` + "`" + `.
|
|
|
|
|
|
|
|
**Important**: Since this can cause data loss, test first with the
|
|
|
|
--dry-run flag.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(2, 2, cmd, args)
|
|
|
|
fsrc, fdst := newFsSrcDst(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.MoveDir(fdst, fsrc)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var lsCmd = &cobra.Command{
|
|
|
|
Use: "ls remote:path",
|
|
|
|
Short: `List all the objects in the the path with size and path.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
return fs.List(fsrc, os.Stdout)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var lsdCmd = &cobra.Command{
|
|
|
|
Use: "lsd remote:path",
|
|
|
|
Short: `List all directories/containers/buckets in the the path.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
return fs.ListDir(fsrc, os.Stdout)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var lslCmd = &cobra.Command{
|
|
|
|
Use: "lsl remote:path",
|
|
|
|
Short: `List all the objects path with modification time, size and path.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
return fs.ListLong(fsrc, os.Stdout)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var md5sumCmd = &cobra.Command{
|
|
|
|
Use: "md5sum remote:path",
|
|
|
|
Short: `Produces an md5sum file for all the objects in the path.`,
|
|
|
|
Long: `
|
|
|
|
Produces an md5sum file for all the objects in the path. This
|
2016-08-03 22:36:28 +02:00
|
|
|
is in the same format as the standard md5sum tool produces.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
return fs.Md5sum(fsrc, os.Stdout)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var sha1sumCmd = &cobra.Command{
|
|
|
|
Use: "sha1sum remote:path",
|
|
|
|
Short: `Produces an sha1sum file for all the objects in the path.`,
|
|
|
|
Long: `
|
|
|
|
Produces an sha1sum file for all the objects in the path. This
|
2016-08-03 22:36:28 +02:00
|
|
|
is in the same format as the standard sha1sum tool produces.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
return fs.Sha1sum(fsrc, os.Stdout)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var sizeCmd = &cobra.Command{
|
|
|
|
Use: "size remote:path",
|
2016-08-03 22:36:28 +02:00
|
|
|
Short: `Prints the total size and number of objects in remote:path.`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
objects, size, err := fs.Count(fsrc)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Printf("Total objects: %d\n", objects)
|
|
|
|
fmt.Printf("Total size: %s (%d Bytes)\n", fs.SizeSuffix(size).Unit("Bytes"), size)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var mkdirCmd = &cobra.Command{
|
|
|
|
Use: "mkdir remote:path",
|
|
|
|
Short: `Make the path if it doesn't already exist.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fdst := newFsDst(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.Mkdir(fdst)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var rmdirCmd = &cobra.Command{
|
|
|
|
Use: "rmdir remote:path",
|
2016-08-03 22:36:28 +02:00
|
|
|
Short: `Remove the path if empty.`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Long: `
|
|
|
|
Remove the path. Note that you can't remove a path with
|
|
|
|
objects in it, use purge for that.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fdst := newFsDst(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.Rmdir(fdst)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var purgeCmd = &cobra.Command{
|
|
|
|
Use: "purge remote:path",
|
|
|
|
Short: `Remove the path and all of its contents.`,
|
|
|
|
Long: `
|
2016-08-03 22:36:28 +02:00
|
|
|
Remove the path and all of its contents. Note that this does not obey
|
|
|
|
include/exclude filters - everything will be removed. Use ` + "`" + `delete` + "`" + ` if
|
|
|
|
you want to selectively delete files.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fdst := newFsDst(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.Purge(fdst)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var deleteCmd = &cobra.Command{
|
|
|
|
Use: "delete remote:path",
|
|
|
|
Short: `Remove the contents of path.`,
|
|
|
|
Long: `
|
2016-08-03 22:36:28 +02:00
|
|
|
Remove the contents of path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
|
|
|
|
filters so can be used to selectively delete files.
|
|
|
|
|
|
|
|
Eg delete all files bigger than 100MBytes
|
|
|
|
|
|
|
|
Check what would be deleted first (use either)
|
|
|
|
|
|
|
|
rclone --min-size 100M lsl remote:path
|
|
|
|
rclone --dry-run --min-size 100M delete remote:path
|
|
|
|
|
|
|
|
Then delete
|
|
|
|
|
|
|
|
rclone --min-size 100M delete remote:path
|
|
|
|
|
|
|
|
That reads "delete everything with a minimum size of 100 MB", hence
|
|
|
|
delete all files bigger than 100MBytes.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.Delete(fsrc)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var checkCmd = &cobra.Command{
|
|
|
|
Use: "check source:path dest:path",
|
|
|
|
Short: `Checks the files in the source and destination match.`,
|
|
|
|
Long: `
|
|
|
|
Checks the files in the source and destination match. It
|
|
|
|
compares sizes and MD5SUMs and prints a report of files which
|
2016-08-03 22:36:28 +02:00
|
|
|
don't match. It doesn't alter the source or destination.
|
|
|
|
|
|
|
|
` + "`" + `--size-only` + "`" + ` may be used to only compare the sizes, not the MD5SUMs.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(2, 2, cmd, args)
|
|
|
|
fsrc, fdst := newFsSrcDst(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
return fs.Check(fdst, fsrc)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var dedupeCmd = &cobra.Command{
|
2016-08-03 19:00:25 +02:00
|
|
|
Use: "dedupe [mode] remote:path",
|
2016-08-03 00:56:20 +02:00
|
|
|
Short: `Interactively find duplicate files delete/rename them.`,
|
|
|
|
Long: `
|
2016-08-03 22:36:28 +02:00
|
|
|
By default ` + "`" + `dedup` + "`" + ` interactively finds duplicate files and offers to
|
|
|
|
delete all but one or rename them to be different. Only useful with
|
2016-08-03 19:00:25 +02:00
|
|
|
Google Drive which can have duplicate file names.
|
|
|
|
|
2016-08-03 22:36:28 +02:00
|
|
|
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
|
|
|
|
md5sum) files it finds without confirmation. This means that for most
|
|
|
|
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive. You
|
|
|
|
can use ` + "`" + `--dry-run` + "`" + ` to see what would happen without doing anything.
|
|
|
|
|
|
|
|
Here is an example run.
|
|
|
|
|
|
|
|
Before - with duplicates
|
|
|
|
|
|
|
|
$ rclone lsl drive:dupes
|
|
|
|
6048320 2016-03-05 16:23:16.798000000 one.txt
|
|
|
|
6048320 2016-03-05 16:23:11.775000000 one.txt
|
|
|
|
564374 2016-03-05 16:23:06.731000000 one.txt
|
|
|
|
6048320 2016-03-05 16:18:26.092000000 one.txt
|
|
|
|
6048320 2016-03-05 16:22:46.185000000 two.txt
|
|
|
|
1744073 2016-03-05 16:22:38.104000000 two.txt
|
|
|
|
564374 2016-03-05 16:22:52.118000000 two.txt
|
|
|
|
|
|
|
|
Now the ` + "`" + `dedupe` + "`" + ` session
|
|
|
|
|
|
|
|
$ rclone dedupe drive:dupes
|
|
|
|
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
|
|
|
one.txt: Found 4 duplicates - deleting identical copies
|
|
|
|
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
|
|
|
|
one.txt: 2 duplicates remain
|
|
|
|
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
|
|
|
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
|
|
|
s) Skip and do nothing
|
|
|
|
k) Keep just one (choose which in next step)
|
|
|
|
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
|
|
|
s/k/r> k
|
|
|
|
Enter the number of the file to keep> 1
|
|
|
|
one.txt: Deleted 1 extra copies
|
|
|
|
two.txt: Found 3 duplicates - deleting identical copies
|
|
|
|
two.txt: 3 duplicates remain
|
|
|
|
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
|
|
|
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
|
|
|
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
|
|
|
|
s) Skip and do nothing
|
|
|
|
k) Keep just one (choose which in next step)
|
|
|
|
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
|
|
|
s/k/r> r
|
|
|
|
two-1.txt: renamed from: two.txt
|
|
|
|
two-2.txt: renamed from: two.txt
|
|
|
|
two-3.txt: renamed from: two.txt
|
|
|
|
|
|
|
|
The result being
|
|
|
|
|
|
|
|
$ rclone lsl drive:dupes
|
|
|
|
6048320 2016-03-05 16:23:16.798000000 one.txt
|
|
|
|
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
|
|
|
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
|
|
|
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
|
|
|
|
|
|
|
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
|
|
|
|
|
|
|
|
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
|
|
|
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
|
|
|
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
|
|
|
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
|
|
|
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
|
|
|
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
|
|
|
|
|
|
|
For example to rename all the identically named photos in your Google Photos directory, do
|
|
|
|
|
|
|
|
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
|
|
|
|
|
|
|
Or
|
|
|
|
|
|
|
|
rclone dedupe rename "drive:Google Photos"
|
2016-08-03 19:00:25 +02:00
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
2016-08-03 19:00:25 +02:00
|
|
|
checkArgs(1, 2, cmd, args)
|
|
|
|
if len(args) > 1 {
|
|
|
|
err := dedupeMode.Set(args[0])
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
args = args[1:]
|
|
|
|
}
|
|
|
|
fdst := newFsSrc(args)
|
2016-08-03 00:56:20 +02:00
|
|
|
run(false, cmd, func() error {
|
2016-08-03 18:35:29 +02:00
|
|
|
return fs.Deduplicate(fdst, dedupeMode)
|
2016-08-03 00:56:20 +02:00
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var configCmd = &cobra.Command{
|
|
|
|
Use: "config",
|
|
|
|
Short: `Enter an interactive configuration session.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(0, 0, cmd, args)
|
|
|
|
fs.EditConfig()
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-08-03 21:41:08 +02:00
|
|
|
var genautocompleteCmd = &cobra.Command{
|
|
|
|
Use: "genautocomplete [output_file]",
|
|
|
|
Short: `Output bash completion script for rclone.`,
|
|
|
|
Long: `
|
|
|
|
Generates a bash shell autocompletion script for rclone.
|
|
|
|
|
|
|
|
This writes to /etc/bash_completion.d/rclone by default so will
|
|
|
|
probably need to be run with sudo or as root, eg
|
|
|
|
|
|
|
|
sudo rclone genautocomplete
|
|
|
|
|
|
|
|
Logout and login again to use the autocompletion scripts, or source
|
|
|
|
them directly
|
|
|
|
|
|
|
|
. /etc/bash_completion
|
|
|
|
|
|
|
|
If you supply a command line argument the script will be written
|
|
|
|
there.
|
|
|
|
`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(0, 1, cmd, args)
|
|
|
|
out := "/etc/bash_completion.d/rclone"
|
|
|
|
if len(args) > 0 {
|
|
|
|
out = args[0]
|
|
|
|
}
|
|
|
|
err := rootCmd.GenBashCompletionFile(out)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-08-03 22:35:33 +02:00
|
|
|
const gendocFrontmatterTemplate = `---
|
|
|
|
date: %s
|
|
|
|
title: "%s"
|
|
|
|
slug: %s
|
|
|
|
url: %s
|
|
|
|
---
|
|
|
|
`
|
|
|
|
|
|
|
|
var gendocsCmd = &cobra.Command{
|
|
|
|
Use: "gendocs output_directory",
|
|
|
|
Short: `Output markdown docs for rclone to the directory supplied.`,
|
|
|
|
Long: `
|
|
|
|
This produces markdown docs for the rclone commands to the directory
|
|
|
|
supplied. These are in a format suitable for hugo to render into the
|
|
|
|
rclone.org website.`,
|
|
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
out := args[0]
|
|
|
|
err := os.MkdirAll(out, 0777)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
now := time.Now().Format(time.RFC3339)
|
|
|
|
prepender := func(filename string) string {
|
|
|
|
name := filepath.Base(filename)
|
|
|
|
base := strings.TrimSuffix(name, path.Ext(name))
|
|
|
|
url := "/commands/" + strings.ToLower(base) + "/"
|
|
|
|
return fmt.Sprintf(gendocFrontmatterTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
|
|
|
}
|
|
|
|
linkHandler := func(name string) string {
|
|
|
|
base := strings.TrimSuffix(name, path.Ext(name))
|
|
|
|
return "/commands/" + strings.ToLower(base) + "/"
|
|
|
|
}
|
|
|
|
return doc.GenMarkdownTreeCustom(rootCmd, out, prepender, linkHandler)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
var authorizeCmd = &cobra.Command{
|
|
|
|
Use: "authorize",
|
|
|
|
Short: `Remote authorization.`,
|
|
|
|
Long: `
|
|
|
|
Remote authorization. Used to authorize a remote or headless
|
|
|
|
rclone from a machine with a browser - use as instructed by
|
|
|
|
rclone config.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 3, cmd, args)
|
|
|
|
fs.Authorize(args)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var cleanupCmd = &cobra.Command{
|
|
|
|
Use: "cleanup remote:path",
|
|
|
|
Short: `Clean up the remote if possible`,
|
|
|
|
Long: `
|
2016-08-03 22:36:28 +02:00
|
|
|
Clean up the remote if possible. Empty the trash or delete old file
|
|
|
|
versions. Not supported by all remotes.
|
|
|
|
`,
|
2016-08-03 00:56:20 +02:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(true, cmd, func() error {
|
|
|
|
return fs.CleanUp(fsrc)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-08-03 18:50:27 +02:00
|
|
|
var memtestCmd = &cobra.Command{
|
|
|
|
Use: "memtest remote:path",
|
|
|
|
Short: `Load all the objects at remote:path and report memory stats.`,
|
|
|
|
Hidden: true,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(1, 1, cmd, args)
|
|
|
|
fsrc := newFsSrc(args)
|
|
|
|
run(false, cmd, func() error {
|
|
|
|
objects, _, err := fs.Count(fsrc)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
objs := make([]fs.Object, 0, objects)
|
|
|
|
var before, after runtime.MemStats
|
|
|
|
runtime.GC()
|
|
|
|
runtime.ReadMemStats(&before)
|
|
|
|
var mu sync.Mutex
|
|
|
|
err = fs.ListFn(fsrc, func(o fs.Object) {
|
|
|
|
mu.Lock()
|
|
|
|
objs = append(objs, o)
|
|
|
|
mu.Unlock()
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
runtime.GC()
|
|
|
|
runtime.ReadMemStats(&after)
|
|
|
|
usedMemory := after.Alloc - before.Alloc
|
|
|
|
fs.Log(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), usedMemory, float64(usedMemory)/float64(len(objs)))
|
|
|
|
fs.Log(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, after.Sys-before.Sys)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
var versionCmd = &cobra.Command{
|
|
|
|
Use: "version",
|
|
|
|
Short: `Show the version number.`,
|
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
checkArgs(0, 0, cmd, args)
|
|
|
|
showVersion()
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// initConfig is run by cobra after initialising the flags
|
|
|
|
func initConfig() {
|
2015-02-27 17:51:17 +01:00
|
|
|
// Log file output
|
|
|
|
if *logFile != "" {
|
|
|
|
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Failed to open log file: %v", err)
|
|
|
|
}
|
2015-09-22 08:31:12 +02:00
|
|
|
_, err = f.Seek(0, os.SEEK_END)
|
|
|
|
if err != nil {
|
2016-06-06 22:23:54 +02:00
|
|
|
fs.ErrorLog(nil, "Failed to seek log file to end: %v", err)
|
2015-09-22 08:31:12 +02:00
|
|
|
}
|
2015-02-27 17:51:17 +01:00
|
|
|
log.SetOutput(f)
|
2016-06-04 19:49:27 +02:00
|
|
|
fs.DebugLogger.SetOutput(f)
|
2015-02-27 17:51:17 +01:00
|
|
|
redirectStderr(f)
|
|
|
|
}
|
2016-06-03 23:08:27 +02:00
|
|
|
|
|
|
|
// Load the rest of the config now we have started the logger
|
|
|
|
fs.LoadConfig()
|
|
|
|
|
|
|
|
// Write the args for debug purposes
|
2016-06-04 00:08:14 +02:00
|
|
|
fs.Debug("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
2015-02-27 17:51:17 +01:00
|
|
|
|
2016-01-09 16:25:48 +01:00
|
|
|
// Setup CPU profiling if desired
|
|
|
|
if *cpuProfile != "" {
|
2016-06-06 22:23:54 +02:00
|
|
|
fs.Log(nil, "Creating CPU profile %q\n", *cpuProfile)
|
2016-01-09 16:25:48 +01:00
|
|
|
f, err := os.Create(*cpuProfile)
|
|
|
|
if err != nil {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
err = pprof.StartCPUProfile(f)
|
|
|
|
if err != nil {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
defer pprof.StopCPUProfile()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup memory profiling if desired
|
|
|
|
if *memProfile != "" {
|
|
|
|
defer func() {
|
2016-06-06 22:23:54 +02:00
|
|
|
fs.Log(nil, "Saving Memory profile %q\n", *memProfile)
|
2016-01-09 16:25:48 +01:00
|
|
|
f, err := os.Create(*memProfile)
|
|
|
|
if err != nil {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
err = pprof.WriteHeapProfile(f)
|
|
|
|
if err != nil {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
err = f.Close()
|
|
|
|
if err != nil {
|
|
|
|
fs.Stats.Error()
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2016-08-03 00:56:20 +02:00
|
|
|
}
|
2016-01-09 16:25:48 +01:00
|
|
|
|
2016-08-03 00:56:20 +02:00
|
|
|
func main() {
|
|
|
|
if err := rootCmd.Execute(); err != nil {
|
|
|
|
fmt.Println(err)
|
|
|
|
os.Exit(-1)
|
2016-01-09 16:25:48 +01:00
|
|
|
}
|
2016-08-03 00:56:20 +02:00
|
|
|
os.Exit(0)
|
2012-11-18 18:32:31 +01:00
|
|
|
}
|