1
mirror of https://github.com/rclone/rclone synced 2025-01-11 14:26:24 +01:00

operations: fix dedupe continuing on errors like insufficientFilePermisson - fixes #3470

* Fix dedupe on merge continuing on errors like insufficientFilePermisson
* Sorted the directories to remove recursion logic
This commit is contained in:
SezalAgrawal 2019-11-26 16:28:52 +05:30 committed by Nick Craig-Wood
parent 420ae905b5
commit c3751e9a50

View File

@ -211,12 +211,18 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, er
if err != nil { if err != nil {
return nil, errors.Wrap(err, "find duplicate dirs") return nil, errors.Wrap(err, "find duplicate dirs")
} }
duplicateDirs := [][]fs.Directory{} // make sure parents are before children
for _, ds := range dirs { duplicateNames := []string{}
for name, ds := range dirs {
if len(ds) > 1 { if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds) duplicateNames = append(duplicateNames, name)
} }
} }
sort.Strings(duplicateNames)
duplicateDirs := [][]fs.Directory{}
for _, name := range duplicateNames {
duplicateDirs = append(duplicateDirs, dirs[name])
}
return duplicateDirs, nil return duplicateDirs, nil
} }
@ -235,7 +241,8 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
fs.Infof(dirs[0], "Merging contents of duplicate directories") fs.Infof(dirs[0], "Merging contents of duplicate directories")
err := mergeDirs(ctx, dirs) err := mergeDirs(ctx, dirs)
if err != nil { if err != nil {
return errors.Wrap(err, "merge duplicate dirs") err = fs.CountError(err)
fs.Errorf(nil, "merge duplicate dirs: %v", err)
} }
} else { } else {
fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run") fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
@ -251,23 +258,16 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error { func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
fs.Infof(f, "Looking for duplicates using %v mode.", mode) fs.Infof(f, "Looking for duplicates using %v mode.", mode)
// Find duplicate directories first and fix them - repeat // Find duplicate directories first and fix them
// until all fixed
for {
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f) duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
if err != nil { if err != nil {
return err return err
} }
if len(duplicateDirs) == 0 { if len(duplicateDirs) != 0 {
break
}
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs) err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
if err != nil { if err != nil {
return err return err
} }
if fs.Config.DryRun {
break
}
} }
// find a hash to use // find a hash to use
@ -275,7 +275,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
// Now find duplicate files // Now find duplicate files
files := map[string][]fs.Object{} files := map[string][]fs.Object{}
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) { entries.ForObject(func(o fs.Object) {
remote := o.Remote() remote := o.Remote()
files[remote] = append(files[remote], o) files[remote] = append(files[remote], o)