2015-09-22 19:47:16 +02:00
// Package drive interfaces with the Google Drive object storage system
2013-06-27 21:13:07 +02:00
package drive
2013-01-15 00:38:18 +01:00
// FIXME need to deal with some corner cases
// * multiple files with the same name
// * files can be in multiple directories
// * can have directory loops
2013-01-20 12:56:56 +01:00
// * files with / in name
2013-01-15 00:38:18 +01:00
import (
2018-07-26 15:45:41 +02:00
"bytes"
2019-06-17 10:34:30 +02:00
"context"
2019-10-16 12:22:25 +02:00
"crypto/tls"
2013-01-15 00:38:18 +01:00
"fmt"
"io"
2017-11-29 22:34:19 +01:00
"io/ioutil"
2015-08-18 09:55:09 +02:00
"log"
2018-02-07 12:53:46 +01:00
"mime"
2013-01-15 00:38:18 +01:00
"net/http"
2018-01-24 00:46:41 +01:00
"net/url"
2016-11-25 22:52:43 +01:00
"path"
2019-02-07 16:59:00 +01:00
"sort"
2017-07-06 16:32:57 +02:00
"strconv"
2013-01-15 00:38:18 +01:00
"strings"
2018-01-31 21:03:02 +01:00
"sync"
2020-05-30 13:08:22 +02:00
"sync/atomic"
2018-08-21 12:51:36 +02:00
"text/template"
2013-01-15 00:38:18 +01:00
"time"
2016-06-12 16:06:02 +02:00
"github.com/pkg/errors"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs"
2020-05-07 19:35:39 +02:00
"github.com/rclone/rclone/fs/cache"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
2020-01-14 18:33:35 +01:00
"github.com/rclone/rclone/lib/encoder"
2020-06-02 12:54:52 +02:00
"github.com/rclone/rclone/lib/env"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
2016-06-12 16:06:27 +02:00
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
2018-09-18 12:29:05 +02:00
drive_v2 "google.golang.org/api/drive/v2"
2018-03-28 20:33:39 +02:00
drive "google.golang.org/api/drive/v3"
2016-06-12 16:06:27 +02:00
"google.golang.org/api/googleapi"
2014-03-15 17:06:11 +01:00
)
2013-06-29 13:15:31 +02:00
2014-03-16 15:01:17 +01:00
// Constants
const (
2016-02-28 20:57:19 +01:00
rcloneClientID = "202264815644.apps.googleusercontent.com"
2016-08-14 13:04:43 +02:00
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
2016-02-28 20:57:19 +01:00
driveFolderType = "application/vnd.google-apps.folder"
2020-04-12 17:55:11 +02:00
shortcutMimeType = "application/vnd.google-apps.shortcut"
2016-02-28 20:57:19 +01:00
timeFormatIn = time . RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
2018-12-30 19:05:34 +01:00
defaultMinSleep = fs . Duration ( 100 * time . Millisecond )
2019-01-20 09:36:22 +01:00
defaultBurst = 100
2018-08-19 16:16:11 +02:00
defaultExportExtensions = "docx,xlsx,pptx,svg"
2018-01-23 14:36:20 +01:00
scopePrefix = "https://www.googleapis.com/auth/"
defaultScope = "drive"
2018-05-14 19:06:57 +02:00
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
2018-09-07 13:02:27 +02:00
minChunkSize = 256 * fs . KibiByte
defaultChunkSize = 8 * fs . MebiByte
2020-04-12 17:55:11 +02:00
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
2020-05-30 13:08:22 +02:00
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
2014-03-16 15:01:17 +01:00
)
// Globals
var (
2014-07-13 18:53:11 +02:00
// Description of how to auth for this app
2015-08-18 09:55:09 +02:00
driveConfig = & oauth2 . Config {
2018-01-23 14:36:20 +01:00
Scopes : [ ] string { scopePrefix + "drive" } ,
2015-08-18 09:55:09 +02:00
Endpoint : google . Endpoint ,
ClientID : rcloneClientID ,
2018-01-18 21:19:55 +01:00
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2015-08-18 09:55:09 +02:00
RedirectURL : oauthutil . TitleBarRedirectURL ,
2014-07-13 18:53:11 +02:00
}
2018-02-07 12:53:46 +01:00
_mimeTypeToExtensionDuplicates = map [ string ] string {
"application/x-vnd.oasis.opendocument.presentation" : ".odp" ,
"application/x-vnd.oasis.opendocument.spreadsheet" : ".ods" ,
"application/x-vnd.oasis.opendocument.text" : ".odt" ,
"image/jpg" : ".jpg" ,
"image/x-bmp" : ".bmp" ,
"image/x-png" : ".png" ,
"text/rtf" : ".rtf" ,
}
_mimeTypeToExtension = map [ string ] string {
"application/epub+zip" : ".epub" ,
"application/json" : ".json" ,
"application/msword" : ".doc" ,
"application/pdf" : ".pdf" ,
"application/rtf" : ".rtf" ,
"application/vnd.ms-excel" : ".xls" ,
"application/vnd.oasis.opendocument.presentation" : ".odp" ,
"application/vnd.oasis.opendocument.spreadsheet" : ".ods" ,
"application/vnd.oasis.opendocument.text" : ".odt" ,
"application/vnd.openxmlformats-officedocument.presentationml.presentation" : ".pptx" ,
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" : ".xlsx" ,
"application/vnd.openxmlformats-officedocument.wordprocessingml.document" : ".docx" ,
"application/x-msmetafile" : ".wmf" ,
"application/zip" : ".zip" ,
"image/bmp" : ".bmp" ,
"image/jpeg" : ".jpg" ,
"image/pjpeg" : ".pjpeg" ,
"image/png" : ".png" ,
"image/svg+xml" : ".svg" ,
"text/csv" : ".csv" ,
"text/html" : ".html" ,
"text/plain" : ".txt" ,
"text/tab-separated-values" : ".tsv" ,
2016-01-26 17:52:53 +01:00
}
2018-08-21 12:51:36 +02:00
_mimeTypeToExtensionLinks = map [ string ] string {
"application/x-link-desktop" : ".desktop" ,
"application/x-link-html" : ".link.html" ,
"application/x-link-url" : ".url" ,
"application/x-link-webloc" : ".webloc" ,
}
2018-08-30 18:03:48 +02:00
_mimeTypeCustomTransform = map [ string ] string {
"application/vnd.google-apps.script+json" : "application/json" ,
}
2018-08-21 12:51:36 +02:00
fetchFormatsOnce sync . Once // make sure we fetch the export/import formats only once
_exportFormats map [ string ] [ ] string // allowed export MIME type conversions
_importFormats map [ string ] [ ] string // allowed import MIME type conversions
templatesOnce sync . Once // parse link templates only once
_linkTemplates map [ string ] * template . Template // available link types
2014-03-16 15:01:17 +01:00
)
2019-01-05 21:53:42 +01:00
// Parse the scopes option returning a slice of scopes
func driveScopes ( scopesString string ) ( scopes [ ] string ) {
if scopesString == "" {
scopesString = defaultScope
}
for _ , scope := range strings . Split ( scopesString , "," ) {
scope = strings . TrimSpace ( scope )
scopes = append ( scopes , scopePrefix + scope )
}
return scopes
}
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder ( scopes [ ] string ) bool {
for _ , scope := range scopes {
if scope == scopePrefix + "drive.appfolder" {
return true
}
}
return false
}
2013-06-29 13:15:31 +02:00
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "drive" ,
Description : "Google Drive" ,
NewFs : NewFs ,
2020-04-29 19:54:16 +02:00
CommandHelp : commandHelp ,
2018-05-14 19:06:57 +02:00
Config : func ( name string , m configmap . Mapper ) {
2019-09-06 14:50:16 +02:00
ctx := context . TODO ( )
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
fs . Errorf ( nil , "Couldn't parse config into struct: %v" , err )
return
}
2019-01-05 21:53:42 +01:00
2018-01-23 14:36:20 +01:00
// Fill in the scopes
2019-01-05 21:53:42 +01:00
driveConfig . Scopes = driveScopes ( opt . Scope )
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder ( driveConfig . Scopes ) {
m . Set ( "root_folder_id" , "appDataFolder" )
2018-01-23 14:36:20 +01:00
}
2019-01-05 21:53:42 +01:00
2018-05-14 19:06:57 +02:00
if opt . ServiceAccountFile == "" {
2020-05-25 16:06:08 +02:00
err = oauthutil . Config ( "drive" , name , m , driveConfig , nil )
2017-11-29 22:34:19 +01:00
if err != nil {
log . Fatalf ( "Failed to configure token: %v" , err )
}
2015-08-18 09:55:09 +02:00
}
2019-09-06 14:50:16 +02:00
err = configTeamDrive ( ctx , opt , m , name )
2017-06-01 21:12:11 +02:00
if err != nil {
log . Fatalf ( "Failed to configure team drive: %v" , err )
}
2014-07-13 18:53:11 +02:00
} ,
2014-03-15 17:06:11 +01:00
Options : [ ] fs . Option { {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientID ,
2019-02-26 22:50:15 +01:00
Help : "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance." ,
2014-03-15 17:06:11 +01:00
} , {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientSecret ,
2019-02-26 22:50:15 +01:00
Help : "Google Application Client Secret\nSetting your own is recommended." ,
2018-01-23 14:36:20 +01:00
} , {
Name : "scope" ,
Help : "Scope that rclone should use when requesting access from drive." ,
Examples : [ ] fs . OptionExample { {
Value : "drive" ,
Help : "Full access all files, excluding Application Data Folder." ,
} , {
Value : "drive.readonly" ,
Help : "Read-only access to file metadata and file contents." ,
} , {
Value : "drive.file" ,
Help : "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app." ,
} , {
Value : "drive.appfolder" ,
Help : "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website." ,
} , {
Value : "drive.metadata.readonly" ,
Help : "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content." ,
} } ,
} , {
Name : "root_folder_id" ,
2019-10-21 09:47:17 +02:00
Help : ` ID of the root folder
Leave blank normally .
Fill in to access "Computers" folders ( see docs ) , or for rclone to use
a non root folder as its starting point .
Note that if this is blank , the first time rclone runs it will fill it
in with the ID of the root folder .
` ,
2017-11-29 22:34:19 +01:00
} , {
Name : "service_account_file" ,
2020-06-02 12:54:52 +02:00
Help : "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env . ShellExpandHelp ,
2018-05-14 19:06:57 +02:00
} , {
Name : "service_account_credentials" ,
Help : "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." ,
2018-10-12 09:38:40 +02:00
Hide : fs . OptionHideConfigurator ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "team_drive" ,
Help : "ID of the Team Drive" ,
2018-10-12 09:38:40 +02:00
Hide : fs . OptionHideConfigurator ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "auth_owner_only" ,
Default : false ,
Help : "Only consider files owned by the authenticated user." ,
Advanced : true ,
} , {
Name : "use_trash" ,
Default : true ,
2018-10-01 19:36:15 +02:00
Help : "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "skip_gdocs" ,
Default : false ,
2018-10-01 19:36:15 +02:00
Help : "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2019-03-07 04:44:09 +01:00
} , {
Name : "skip_checksum_gphotos" ,
Default : false ,
Help : ` Skip MD5 checksum on Google photos and videos only .
Use this if you get checksum errors when transferring Google photos or
videos .
Setting this flag will cause Google photos and videos to return a
blank MD5 checksum .
2020-05-25 08:05:53 +02:00
Google photos are identified by being in the "photos" space .
2019-03-07 04:44:09 +01:00
Corrupted checksums are caused by Google modifying the image / video but
not updating the checksum . ` ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "shared_with_me" ,
Default : false ,
Help : ` Only show files that are shared with me .
Instructs rclone to operate on your "Shared with me" folder ( where
Google Drive lets you access the files and folders others have shared
with you ) .
This works both with the "list" ( lsd , lsl , etc ) and the "copy"
commands ( copy , sync , etc ) , and with all other commands too . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "trashed_only" ,
Default : false ,
2018-10-01 19:36:15 +02:00
Help : "Only show files that are in the trash.\nThis will show trashed files in their original directory structure." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "formats" ,
2018-08-19 16:16:11 +02:00
Default : "" ,
Help : "Deprecated: see export_formats" ,
Advanced : true ,
Hide : fs . OptionHideConfigurator ,
} , {
Name : "export_formats" ,
Default : defaultExportExtensions ,
2018-05-14 19:06:57 +02:00
Help : "Comma separated list of preferred formats for downloading Google docs." ,
Advanced : true ,
2018-08-19 16:16:11 +02:00
} , {
Name : "import_formats" ,
Default : "" ,
Help : "Comma separated list of preferred formats for uploading Google docs." ,
Advanced : true ,
} , {
Name : "allow_import_name_change" ,
Default : false ,
Help : "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time." ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "use_created_date" ,
Default : false ,
Help : ` Use file created date instead of modified date . ,
Useful when downloading data and you want the creation date used in
place of the last modified date .
* * WARNING * * : This flag may have some unexpected consequences .
When uploading to your drive all files will be overwritten unless they
haven ' t been modified since their creation . And the inverse will occur
while downloading . This side effect can be avoided by using the
"--checksum" flag .
This feature was implemented to retain photos capture date as recorded
by google photos . You will first need to check the " Create a Google
Photos folder " option in your google drive settings . You can then copy
or move the photos locally and use the date the image was taken
( created ) set as the modification date . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2020-01-31 11:02:14 +01:00
Hide : fs . OptionHideConfigurator ,
2019-11-18 22:34:10 +01:00
} , {
Name : "use_shared_date" ,
Default : false ,
Help : ` Use date file was shared instead of modified date .
Note that , as with "--drive-use-created-date" , this flag may have
unexpected consequences when uploading / downloading files .
If both this flag and "--drive-use-created-date" are set , the created
date is used . ` ,
Advanced : true ,
2020-01-31 11:02:14 +01:00
Hide : fs . OptionHideConfigurator ,
2018-05-14 19:06:57 +02:00
} , {
Name : "list_chunk" ,
Default : 1000 ,
Help : "Size of listing chunk 100-1000. 0 to disable." ,
Advanced : true ,
} , {
2020-06-17 12:58:01 +02:00
Name : "impersonate" ,
Default : "" ,
Help : ` Impersonate this user when using a service account .
Note that if this is used then "root_folder_id" will be ignored .
` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "alternate_export" ,
Default : false ,
Help : ` Use alternate export URLs for google documents export . ,
If this option is set this instructs rclone to use an alternate set of
export URLs for drive documents . Users have reported that the
official export URLs can ' t export large documents , whereas these
unofficial ones can .
2019-07-28 19:47:38 +02:00
See rclone issue [ # 2243 ] ( https : //github.com/rclone/rclone/issues/2243) for background,
2018-10-01 19:36:15 +02:00
[ this google drive issue ] ( https : //issuetracker.google.com/issues/36761333) and
[ this helpful post ] ( https : //www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "upload_cutoff" ,
Default : defaultChunkSize ,
Help : "Cutoff for switching to chunked upload" ,
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
Default : defaultChunkSize ,
Help : ` Upload chunk size . Must a power of 2 >= 256 k .
Making this larger will improve performance , but note that each chunk
is buffered in memory one per transfer .
Reducing this will reduce memory usage but decrease performance . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "acknowledge_abuse" ,
Default : false ,
Help : ` Set to allow files which return cannotDownloadAbusiveFile to be downloaded .
If downloading a file returns the error " This file has been identified
as malware or spam and cannot be downloaded " with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone
will download it anyway . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "keep_revision_forever" ,
Default : false ,
2018-10-01 19:36:15 +02:00
Help : "Keep new head revision of each file forever." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2019-05-25 12:27:30 +02:00
} , {
Name : "size_as_quota" ,
Default : false ,
2020-01-31 11:02:14 +01:00
Help : ` Show sizes as storage quota usage , not actual size .
2019-05-25 12:27:30 +02:00
2020-04-29 19:54:16 +02:00
Show the size of a file as the storage quota used . This is the
2020-01-31 11:02:14 +01:00
current version plus any older versions that have been set to keep
forever .
* * WARNING * * : This flag may have some unexpected consequences .
It is not recommended to set this flag in your config - the
recommended usage is using the flag form -- drive - size - as - quota when
doing rclone ls / lsl / lsf / lsjson / etc only .
If you do use this flag for syncing ( not recommended ) then you will
need to use -- ignore size also . ` ,
2019-05-25 12:27:30 +02:00
Advanced : true ,
2020-01-31 11:02:14 +01:00
Hide : fs . OptionHideConfigurator ,
2018-09-18 12:29:05 +02:00
} , {
Name : "v2_download_min_size" ,
Default : fs . SizeSuffix ( - 1 ) ,
Help : "If Object's are greater, use drive v2 API to download." ,
Advanced : true ,
2018-12-30 19:05:34 +01:00
} , {
Name : "pacer_min_sleep" ,
Default : defaultMinSleep ,
Help : "Minimum time to sleep between API calls." ,
Advanced : true ,
2019-01-20 09:36:22 +01:00
} , {
Name : "pacer_burst" ,
Default : defaultBurst ,
Help : "Number of API calls to allow without sleeping." ,
Advanced : true ,
2019-06-06 18:04:51 +02:00
} , {
Name : "server_side_across_configs" ,
Default : false ,
Help : ` Allow server side operations ( eg copy ) to work across different drive configs .
This can be useful if you wish to do a server side copy between two
different Google drives . Note that this isn ' t enabled by default
2019-10-21 23:28:28 +02:00
because it isn ' t easy to tell if it will work between any two
2019-06-06 18:04:51 +02:00
configurations . ` ,
Advanced : true ,
2019-10-16 12:22:25 +02:00
} , {
Name : "disable_http2" ,
Default : true ,
Help : ` Disable drive using http2
There is currently an unsolved issue with the google drive backend and
HTTP / 2. HTTP / 2 is therefore disabled by default for the drive backend
but can be re - enabled here . When the issue is solved this flag will
be removed .
See : https : //github.com/rclone/rclone/issues/3631
2020-01-12 16:47:31 +01:00
` ,
Advanced : true ,
} , {
Name : "stop_on_upload_limit" ,
Default : false ,
Help : ` Make upload limit errors be fatal
At the time of writing it is only possible to upload 750 GB of data to
Google Drive a day ( this is an undocumented limit ) . When this limit is
reached Google Drive produces a slightly different error message . When
this flag is set it causes these errors to be fatal . These will stop
the in - progress sync .
Note that this detection is relying on error message strings which
Google don ' t document so it may break in the future .
See : https : //github.com/rclone/rclone/issues/3857
2019-10-16 12:22:25 +02:00
` ,
Advanced : true ,
2020-04-12 17:55:11 +02:00
} , {
Name : "skip_shortcuts" ,
Help : ` If set skip shortcut files
Normally rclone dereferences shortcut files making them appear as if
they are the original file ( see [ the shortcuts section ] ( # shortcuts ) ) .
If this flag is set then rclone will ignore shortcut files completely .
` ,
Advanced : true ,
Default : false ,
2020-01-14 18:33:35 +01:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-14 22:51:49 +01:00
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default : encoder . EncodeInvalidUtf8 ,
2014-03-15 17:06:11 +01:00
} } ,
} )
2016-02-06 10:22:52 +01:00
2018-02-07 12:53:46 +01:00
// register duplicate MIME types first
// this allows them to be used with mime.ExtensionsByType() but
// mime.TypeByExtension() will return the later registered MIME type
2018-08-21 12:51:36 +02:00
for _ , m := range [ ] map [ string ] string {
_mimeTypeToExtensionDuplicates , _mimeTypeToExtension , _mimeTypeToExtensionLinks ,
} {
2018-02-07 12:53:46 +01:00
for mimeType , extension := range m {
if err := mime . AddExtensionType ( extension , mimeType ) ; err != nil {
log . Fatalf ( "Failed to register MIME type %q: %v" , mimeType , err )
}
}
2016-02-06 10:22:52 +01:00
}
2013-06-29 13:15:31 +02:00
}
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
2020-01-14 18:33:35 +01:00
Scope string ` config:"scope" `
RootFolderID string ` config:"root_folder_id" `
ServiceAccountFile string ` config:"service_account_file" `
ServiceAccountCredentials string ` config:"service_account_credentials" `
TeamDriveID string ` config:"team_drive" `
AuthOwnerOnly bool ` config:"auth_owner_only" `
UseTrash bool ` config:"use_trash" `
SkipGdocs bool ` config:"skip_gdocs" `
SkipChecksumGphotos bool ` config:"skip_checksum_gphotos" `
SharedWithMe bool ` config:"shared_with_me" `
TrashedOnly bool ` config:"trashed_only" `
Extensions string ` config:"formats" `
ExportExtensions string ` config:"export_formats" `
ImportExtensions string ` config:"import_formats" `
AllowImportNameChange bool ` config:"allow_import_name_change" `
UseCreatedDate bool ` config:"use_created_date" `
UseSharedDate bool ` config:"use_shared_date" `
ListChunk int64 ` config:"list_chunk" `
Impersonate string ` config:"impersonate" `
AlternateExport bool ` config:"alternate_export" `
UploadCutoff fs . SizeSuffix ` config:"upload_cutoff" `
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
AcknowledgeAbuse bool ` config:"acknowledge_abuse" `
KeepRevisionForever bool ` config:"keep_revision_forever" `
SizeAsQuota bool ` config:"size_as_quota" `
V2DownloadMinSize fs . SizeSuffix ` config:"v2_download_min_size" `
PacerMinSleep fs . Duration ` config:"pacer_min_sleep" `
PacerBurst int ` config:"pacer_burst" `
ServerSideAcrossConfigs bool ` config:"server_side_across_configs" `
DisableHTTP2 bool ` config:"disable_http2" `
StopOnUploadLimit bool ` config:"stop_on_upload_limit" `
2020-04-12 17:55:11 +02:00
SkipShortcuts bool ` config:"skip_shortcuts" `
2020-01-14 18:33:35 +01:00
Enc encoder . MultiEncoder ` config:"encoding" `
2018-05-14 19:06:57 +02:00
}
2015-11-07 12:14:46 +01:00
// Fs represents a remote drive server
type Fs struct {
2018-08-19 16:16:11 +02:00
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features * fs . Features // optional features
svc * drive . Service // the connection to the drive server
v2Svc * drive_v2 . Service // used to create download links for the v2 api
client * http . Client // authorized client
rootFolderID string // the id of the root folder
dirCache * dircache . DirCache // Map of directory path to directory id
2019-02-09 21:52:15 +01:00
pacer * fs . Pacer // To pace the API calls
2018-08-19 16:16:11 +02:00
exportExtensions [ ] string // preferred extensions to download docs
importMimeTypes [ ] string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
2020-04-12 17:55:11 +02:00
fileFields googleapi . Field // fields to fetch file info with
2020-04-29 19:54:16 +02:00
m configmap . Mapper
2020-05-30 13:08:22 +02:00
grouping int32 // number of IDs to search at once in ListR - read with atomic
listRmu * sync . Mutex // protects listRempties
listRempties map [ string ] struct { } // IDs of supposedly empty directories which triggered grouping disable
2013-01-15 00:38:18 +01:00
}
2018-08-21 12:49:33 +02:00
type baseObject struct {
fs * Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
2020-03-31 18:25:15 +02:00
parents int // number of parents
2018-08-21 12:49:33 +02:00
}
type documentObject struct {
baseObject
url string // Download URL of this object
documentMimeType string // the original document MIME type
extLen int // The length of the added export extension
}
2018-08-21 12:51:36 +02:00
type linkObject struct {
baseObject
content [ ] byte // The file content generated by a link template
extLen int // The length of the added export extension
}
2018-08-21 12:49:33 +02:00
2015-11-07 12:14:46 +01:00
// Object describes a drive object
type Object struct {
2018-08-21 12:49:33 +02:00
baseObject
url string // Download URL of this object
md5sum string // md5sum of the object
v2Download bool // generate v2 download link ondemand
2013-01-15 00:38:18 +01:00
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Name of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Name ( ) string {
2015-08-22 17:53:11 +02:00
return f . name
}
2015-09-22 19:47:16 +02:00
// Root of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Root ( ) string {
2015-09-01 21:45:27 +02:00
return f . root
}
2015-11-07 12:14:46 +01:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2013-01-15 00:38:18 +01:00
return fmt . Sprintf ( "Google drive root '%s'" , f . root )
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2019-02-07 18:41:17 +01:00
// shouldRetry determines whether a given err rates being retried
2020-01-12 16:47:31 +01:00
func ( f * Fs ) shouldRetry ( err error ) ( bool , error ) {
2018-09-01 13:16:01 +02:00
if err == nil {
return false , nil
}
if fserrors . ShouldRetry ( err ) {
return true , err
}
switch gerr := err . ( type ) {
case * googleapi . Error :
if gerr . Code >= 500 && gerr . Code < 600 {
// All 5xx errors should be retried
return true , err
}
if len ( gerr . Errors ) > 0 {
reason := gerr . Errors [ 0 ] . Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
2020-01-12 16:47:31 +01:00
if f . opt . StopOnUploadLimit && gerr . Errors [ 0 ] . Message == "User rate limit exceeded." {
fs . Errorf ( f , "Received upload limit error: %v" , err )
return false , fserrors . FatalError ( err )
}
2018-09-01 13:16:01 +02:00
return true , err
2020-03-25 14:38:50 +01:00
} else if f . opt . StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
fs . Errorf ( f , "Received team drive file limit error: %v" , err )
return false , fserrors . FatalError ( err )
2015-02-02 18:29:08 +01:00
}
}
}
2018-09-01 13:16:01 +02:00
return false , err
2015-02-02 18:29:08 +01:00
}
2013-01-15 00:38:18 +01:00
// parseParse parses a drive 'url'
func parseDrivePath ( path string ) ( root string , err error ) {
2014-03-27 18:49:36 +01:00
root = strings . Trim ( path , "/" )
2013-01-15 00:38:18 +01:00
return
}
2017-06-11 23:43:31 +02:00
// User function to process a File item from list
2013-01-20 12:56:56 +01:00
//
// Should return true to finish processing
2017-06-11 23:43:31 +02:00
type listFn func ( * drive . File ) bool
2013-01-20 12:56:56 +01:00
2018-07-24 17:14:23 +02:00
func containsString ( slice [ ] string , s string ) bool {
for _ , e := range slice {
if e == s {
return true
}
}
return false
}
2020-02-28 17:45:35 +01:00
// getFile returns drive.File for the ID passed and fields passed in
func ( f * Fs ) getFile ( ID string , fields googleapi . Field ) ( info * drive . File , err error ) {
2019-10-21 09:47:17 +02:00
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2020-02-28 17:45:35 +01:00
info , err = f . svc . Files . Get ( ID ) .
Fields ( fields ) .
2019-10-21 09:47:17 +02:00
SupportsAllDrives ( true ) .
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2019-10-21 09:47:17 +02:00
} )
2020-02-28 17:45:35 +01:00
return info , err
}
// getRootID returns the canonical ID for the "root" ID
func ( f * Fs ) getRootID ( ) ( string , error ) {
info , err := f . getFile ( "root" , "id" )
2019-10-21 09:47:17 +02:00
if err != nil {
return "" , errors . Wrap ( err , "couldn't find root directory ID" )
}
return info . Id , nil
}
2013-01-20 12:56:56 +01:00
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2013-01-15 00:38:18 +01:00
//
// Search params: https://developers.google.com/drive/search-parameters
2019-06-17 10:34:30 +02:00
func ( f * Fs ) list ( ctx context . Context , dirIDs [ ] string , title string , directoriesOnly , filesOnly , includeAll bool , fn listFn ) ( found bool , err error ) {
2017-02-16 13:29:37 +01:00
var query [ ] string
2017-07-06 16:32:57 +02:00
if ! includeAll {
2018-05-14 19:06:57 +02:00
q := "trashed=" + strconv . FormatBool ( f . opt . TrashedOnly )
if f . opt . TrashedOnly {
2017-07-19 15:35:58 +02:00
q = fmt . Sprintf ( "(mimeType='%s' or %s)" , driveFolderType , q )
}
query = append ( query , q )
2017-02-16 13:29:37 +01:00
}
2017-03-25 17:16:56 +01:00
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
// If we need to list file inside those shared folders, we must search it without sharedWithMe
2018-07-26 15:45:41 +02:00
parentsQuery := bytes . NewBufferString ( "(" )
for _ , dirID := range dirIDs {
if dirID == "" {
continue
}
if parentsQuery . Len ( ) > 1 {
_ , _ = parentsQuery . WriteString ( " or " )
}
if f . opt . SharedWithMe && dirID == f . rootFolderID {
_ , _ = parentsQuery . WriteString ( "sharedWithMe=true" )
} else {
_ , _ = fmt . Fprintf ( parentsQuery , "'%s' in parents" , dirID )
}
2017-03-25 17:16:56 +01:00
}
2018-07-26 15:45:41 +02:00
if parentsQuery . Len ( ) > 1 {
_ = parentsQuery . WriteByte ( ')' )
query = append ( query , parentsQuery . String ( ) )
2013-01-23 22:19:26 +01:00
}
2018-08-21 12:54:12 +02:00
var stems [ ] string
2013-01-15 00:38:18 +01:00
if title != "" {
2020-01-14 18:33:35 +01:00
searchTitle := f . opt . Enc . FromStandardName ( title )
2013-01-15 00:38:18 +01:00
// Escaping the backslash isn't documented but seems to work
2018-11-02 13:13:24 +01:00
searchTitle = strings . Replace ( searchTitle , ` \ ` , ` \\ ` , - 1 )
2018-01-29 10:18:34 +01:00
searchTitle = strings . Replace ( searchTitle , ` ' ` , ` \' ` , - 1 )
2018-07-24 17:14:23 +02:00
2018-08-21 12:54:12 +02:00
var titleQuery bytes . Buffer
_ , _ = fmt . Fprintf ( & titleQuery , "(name='%s'" , searchTitle )
if ! directoriesOnly && ! f . opt . SkipGdocs {
// If the search title has an extension that is in the export extensions add a search
// for the filename without the extension.
// Assume that export extensions don't contain escape sequences.
for _ , ext := range f . exportExtensions {
if strings . HasSuffix ( searchTitle , ext ) {
stems = append ( stems , title [ : len ( title ) - len ( ext ) ] )
_ , _ = fmt . Fprintf ( & titleQuery , " or name='%s'" , searchTitle [ : len ( searchTitle ) - len ( ext ) ] )
}
}
2018-07-24 17:14:23 +02:00
}
2018-08-21 12:54:12 +02:00
_ = titleQuery . WriteByte ( ')' )
query = append ( query , titleQuery . String ( ) )
2013-01-15 00:38:18 +01:00
}
if directoriesOnly {
2020-04-12 17:55:11 +02:00
query = append ( query , fmt . Sprintf ( "(mimeType='%s' or mimeType='%s')" , driveFolderType , shortcutMimeType ) )
2013-01-15 00:38:18 +01:00
}
if filesOnly {
2017-02-16 13:29:37 +01:00
query = append ( query , fmt . Sprintf ( "mimeType!='%s'" , driveFolderType ) )
2013-01-15 00:38:18 +01:00
}
2017-02-16 13:29:37 +01:00
list := f . svc . Files . List ( )
if len ( query ) > 0 {
2018-01-23 14:36:20 +01:00
list . Q ( strings . Join ( query , " and " ) )
2017-07-19 15:35:58 +02:00
// fmt.Printf("list Query = %q\n", query)
2017-02-16 13:29:37 +01:00
}
2018-05-14 19:06:57 +02:00
if f . opt . ListChunk > 0 {
list . PageSize ( f . opt . ListChunk )
2016-12-29 19:04:37 +01:00
}
2019-07-30 19:49:06 +02:00
list . SupportsAllDrives ( true )
list . IncludeItemsFromAllDrives ( true )
2017-06-01 21:12:11 +02:00
if f . isTeamDrive {
2019-07-30 19:49:06 +02:00
list . DriveId ( f . opt . TeamDriveID )
list . Corpora ( "drive" )
2017-06-01 21:12:11 +02:00
}
2018-01-23 14:36:20 +01:00
// If using appDataFolder then need to add Spaces
if f . rootFolderID == "appDataFolder" {
list . Spaces ( "appDataFolder" )
}
2017-04-23 21:12:28 +02:00
2020-04-12 17:55:11 +02:00
fields := fmt . Sprintf ( "files(%s),nextPageToken,incompleteSearch" , f . fileFields )
2017-04-27 11:00:04 +02:00
2013-01-20 12:56:56 +01:00
OUTER :
2013-01-15 00:38:18 +01:00
for {
2015-02-02 18:29:08 +01:00
var files * drive . FileList
2015-09-11 20:18:41 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-06 14:50:16 +02:00
files , err = list . Fields ( googleapi . Field ( fields ) ) . Context ( ctx ) . Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2015-02-02 18:29:08 +01:00
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2016-06-12 16:06:02 +02:00
return false , errors . Wrap ( err , "couldn't list directory" )
2013-01-20 12:56:56 +01:00
}
2019-10-26 23:01:07 +02:00
if files . IncompleteSearch {
fs . Errorf ( f , "search result INCOMPLETE" )
}
2018-01-24 00:46:41 +01:00
for _ , item := range files . Files {
2020-01-14 18:33:35 +01:00
item . Name = f . opt . Enc . ToStandardName ( item . Name )
2020-04-12 17:55:11 +02:00
if isShortcut ( item ) {
// ignore shortcuts if directed
if f . opt . SkipShortcuts {
continue
}
// skip file shortcuts if directory only
if directoriesOnly && item . ShortcutDetails . TargetMimeType != driveFolderType {
continue
}
// skip directory shortcuts if file only
if filesOnly && item . ShortcutDetails . TargetMimeType == driveFolderType {
continue
}
item , err = f . resolveShortcut ( item )
if err != nil {
return false , errors . Wrap ( err , "list" )
}
}
2018-06-09 13:00:50 +02:00
// Check the case of items is correct since
// the `=` operator is case insensitive.
2018-01-29 10:18:34 +01:00
if title != "" && title != item . Name {
2018-08-21 12:54:12 +02:00
found := false
for _ , stem := range stems {
if stem == item . Name {
found = true
break
}
}
if ! found {
2018-07-24 17:14:23 +02:00
continue
}
_ , exportName , _ , _ := f . findExportFormat ( item )
if exportName == "" || exportName != title {
continue
}
2018-01-29 10:18:34 +01:00
}
2013-01-20 12:56:56 +01:00
if fn ( item ) {
found = true
break OUTER
}
2013-01-15 00:38:18 +01:00
}
if files . NextPageToken == "" {
break
}
list . PageToken ( files . NextPageToken )
}
return
}
2015-03-14 18:55:38 +01:00
// Returns true of x is a power of 2 or zero
func isPowerOfTwo ( x int64 ) bool {
switch {
case x == 0 :
return true
case x < 0 :
return false
default :
return ( x & ( x - 1 ) ) == 0
}
}
2018-02-07 12:53:46 +01:00
// add a charset parameter to all text/* MIME types
2019-03-05 18:18:04 +01:00
func fixMimeType ( mimeTypeIn string ) string {
if mimeTypeIn == "" {
return ""
}
mediaType , param , err := mime . ParseMediaType ( mimeTypeIn )
2018-02-07 12:53:46 +01:00
if err != nil {
2019-03-05 18:18:04 +01:00
return mimeTypeIn
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
mimeTypeOut := mimeTypeIn
if strings . HasPrefix ( mediaType , "text/" ) && param [ "charset" ] == "" {
2018-02-07 12:53:46 +01:00
param [ "charset" ] = "utf-8"
2019-03-05 18:18:04 +01:00
mimeTypeOut = mime . FormatMediaType ( mediaType , param )
}
if mimeTypeOut == "" {
panic ( errors . Errorf ( "unable to fix MIME type %q" , mimeTypeIn ) )
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
return mimeTypeOut
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
func fixMimeTypeMap ( in map [ string ] [ ] string ) ( out map [ string ] [ ] string ) {
out = make ( map [ string ] [ ] string , len ( in ) )
for k , v := range in {
2018-02-07 12:53:46 +01:00
for i , mt := range v {
2019-03-05 18:18:04 +01:00
v [ i ] = fixMimeType ( mt )
2016-01-26 17:52:53 +01:00
}
2019-03-05 18:18:04 +01:00
out [ fixMimeType ( k ) ] = v
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
return out
2018-02-07 12:53:46 +01:00
}
func isInternalMimeType ( mimeType string ) bool {
return strings . HasPrefix ( mimeType , "application/vnd.google-apps." )
}
2018-08-21 12:51:36 +02:00
func isLinkMimeType ( mimeType string ) bool {
return strings . HasPrefix ( mimeType , "application/x-link-" )
}
2018-02-07 12:53:46 +01:00
// parseExtensions parses a list of comma separated extensions
2018-08-19 16:16:11 +02:00
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions ( extensionsIn ... string ) ( extensions , mimeTypes [ ] string , err error ) {
for _ , extensionText := range extensionsIn {
2018-02-07 12:53:46 +01:00
for _ , extension := range strings . Split ( extensionText , "," ) {
extension = strings . ToLower ( strings . TrimSpace ( extension ) )
2018-08-19 16:16:11 +02:00
if extension == "" {
continue
}
2018-02-07 12:53:46 +01:00
if len ( extension ) > 0 && extension [ 0 ] != '.' {
extension = "." + extension
}
2018-08-19 16:16:11 +02:00
mt := mime . TypeByExtension ( extension )
if mt == "" {
return extensions , mimeTypes , errors . Errorf ( "couldn't find MIME type for extension %q" , extension )
2018-02-07 12:53:46 +01:00
}
2018-09-01 13:16:01 +02:00
if ! containsString ( extensions , extension ) {
2018-08-19 16:16:11 +02:00
extensions = append ( extensions , extension )
mimeTypes = append ( mimeTypes , mt )
2016-01-26 17:52:53 +01:00
}
}
2018-02-07 12:53:46 +01:00
}
2018-08-19 16:16:11 +02:00
return
2016-01-26 17:52:53 +01:00
}
2017-06-01 21:12:11 +02:00
// Figure out if the user wants to use a team drive
2019-09-06 14:50:16 +02:00
func configTeamDrive ( ctx context . Context , opt * Options , m configmap . Mapper , name string ) error {
2019-01-17 16:01:13 +01:00
// Stop if we are running non-interactive config
if fs . Config . AutoConfirm {
return nil
}
2018-05-14 19:06:57 +02:00
if opt . TeamDriveID == "" {
2017-06-01 21:12:11 +02:00
fmt . Printf ( "Configure this as a team drive?\n" )
} else {
2018-05-14 19:06:57 +02:00
fmt . Printf ( "Change current team drive ID %q?\n" , opt . TeamDriveID )
2017-06-01 21:12:11 +02:00
}
2019-11-05 12:53:44 +01:00
if ! config . Confirm ( false ) {
2017-06-01 21:12:11 +02:00
return nil
}
2018-05-14 19:06:57 +02:00
client , err := createOAuthClient ( opt , name , m )
2017-06-01 21:12:11 +02:00
if err != nil {
2018-01-20 19:04:23 +01:00
return errors . Wrap ( err , "config team drive failed to create oauth client" )
2017-06-01 21:12:11 +02:00
}
svc , err := drive . New ( client )
if err != nil {
return errors . Wrap ( err , "config team drive failed to make drive client" )
}
fmt . Printf ( "Fetching team drive list...\n" )
var driveIDs , driveNames [ ] string
2018-01-24 00:46:41 +01:00
listTeamDrives := svc . Teamdrives . List ( ) . PageSize ( 100 )
2018-10-12 09:26:36 +02:00
listFailed := false
2020-01-12 16:47:31 +01:00
var defaultFs Fs // default Fs with default Options
2017-06-01 21:12:11 +02:00
for {
var teamDrives * drive . TeamDriveList
2018-12-30 19:05:34 +01:00
err = newPacer ( opt ) . Call ( func ( ) ( bool , error ) {
2019-09-06 14:50:16 +02:00
teamDrives , err = listTeamDrives . Context ( ctx ) . Do ( )
2020-01-12 16:47:31 +01:00
return defaultFs . shouldRetry ( err )
2017-06-01 21:12:11 +02:00
} )
if err != nil {
2018-10-12 09:26:36 +02:00
fmt . Printf ( "Listing team drives failed: %v\n" , err )
listFailed = true
break
2017-06-01 21:12:11 +02:00
}
2018-01-24 00:46:41 +01:00
for _ , drive := range teamDrives . TeamDrives {
2017-06-01 21:12:11 +02:00
driveIDs = append ( driveIDs , drive . Id )
driveNames = append ( driveNames , drive . Name )
}
if teamDrives . NextPageToken == "" {
break
}
listTeamDrives . PageToken ( teamDrives . NextPageToken )
}
var driveID string
2018-10-12 09:26:36 +02:00
if ! listFailed && len ( driveIDs ) == 0 {
2017-06-01 21:12:11 +02:00
fmt . Printf ( "No team drives found in your account" )
} else {
2018-01-12 17:30:54 +01:00
driveID = config . Choose ( "Enter a Team Drive ID" , driveIDs , driveNames , true )
2017-06-01 21:12:11 +02:00
}
2018-05-14 19:06:57 +02:00
m . Set ( "team_drive" , driveID )
opt . TeamDriveID = driveID
2017-06-01 21:12:11 +02:00
return nil
}
// newPacer makes a pacer configured for drive
2019-02-09 21:52:15 +01:00
func newPacer ( opt * Options ) * fs . Pacer {
return fs . NewPacer ( pacer . NewGoogleDrive ( pacer . MinSleep ( opt . PacerMinSleep ) , pacer . Burst ( opt . PacerBurst ) ) )
2017-06-01 21:12:11 +02:00
}
2019-10-16 12:22:25 +02:00
// getClient makes an http client according to the options
func getClient ( opt * Options ) * http . Client {
t := fshttp . NewTransportCustom ( fs . Config , func ( t * http . Transport ) {
if opt . DisableHTTP2 {
t . TLSNextProto = map [ string ] func ( string , * tls . Conn ) http . RoundTripper { }
}
} )
return & http . Client {
Transport : t ,
}
}
2018-05-14 19:06:57 +02:00
func getServiceAccountClient ( opt * Options , credentialsData [ ] byte ) ( * http . Client , error ) {
2019-01-05 21:53:42 +01:00
scopes := driveScopes ( opt . Scope )
conf , err := google . JWTConfigFromJSON ( credentialsData , scopes ... )
2017-11-29 22:34:19 +01:00
if err != nil {
return nil , errors . Wrap ( err , "error processing credentials" )
}
2018-05-14 19:06:57 +02:00
if opt . Impersonate != "" {
conf . Subject = opt . Impersonate
2018-02-01 11:10:51 +01:00
}
2019-10-16 12:22:25 +02:00
ctxWithSpecialClient := oauthutil . Context ( getClient ( opt ) )
2017-11-29 22:34:19 +01:00
return oauth2 . NewClient ( ctxWithSpecialClient , conf . TokenSource ( ctxWithSpecialClient ) ) , nil
}
2018-05-14 19:06:57 +02:00
func createOAuthClient ( opt * Options , name string , m configmap . Mapper ) ( * http . Client , error ) {
2017-11-29 22:34:19 +01:00
var oAuthClient * http . Client
var err error
2018-04-27 17:07:37 +02:00
// try loading service account credentials from env variable, then from a file
2018-05-14 19:06:57 +02:00
if len ( opt . ServiceAccountCredentials ) == 0 && opt . ServiceAccountFile != "" {
2020-06-02 12:54:52 +02:00
loadedCreds , err := ioutil . ReadFile ( env . ShellExpand ( opt . ServiceAccountFile ) )
2018-04-27 17:07:37 +02:00
if err != nil {
return nil , errors . Wrap ( err , "error opening service account credentials file" )
}
2018-05-14 19:06:57 +02:00
opt . ServiceAccountCredentials = string ( loadedCreds )
2018-04-27 17:07:37 +02:00
}
2018-05-14 19:06:57 +02:00
if opt . ServiceAccountCredentials != "" {
oAuthClient , err = getServiceAccountClient ( opt , [ ] byte ( opt . ServiceAccountCredentials ) )
2017-11-29 22:34:19 +01:00
if err != nil {
2018-01-20 19:04:23 +01:00
return nil , errors . Wrap ( err , "failed to create oauth client from service account" )
2017-11-29 22:34:19 +01:00
}
} else {
2019-10-16 12:22:25 +02:00
oAuthClient , _ , err = oauthutil . NewClientWithBaseClient ( name , m , driveConfig , getClient ( opt ) )
2017-11-29 22:34:19 +01:00
if err != nil {
2018-01-20 19:04:23 +01:00
return nil , errors . Wrap ( err , "failed to create oauth client" )
2017-11-29 22:34:19 +01:00
}
}
return oAuthClient , nil
}
2018-09-07 13:02:27 +02:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
if ! isPowerOfTwo ( int64 ( cs ) ) {
return errors . Errorf ( "%v isn't a power of two" , cs )
}
if cs < minChunkSize {
return errors . Errorf ( "%s is less than %s" , cs , minChunkSize )
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
}
return
}
2018-10-13 23:45:17 +02:00
func checkUploadCutoff ( cs fs . SizeSuffix ) error {
return nil
}
func ( f * Fs ) setUploadCutoff ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadCutoff ( cs )
if err == nil {
old , f . opt . UploadCutoff = f . opt . UploadCutoff , cs
}
return
}
2019-02-07 18:41:17 +01:00
// NewFs constructs an Fs from the path, container:path
2018-05-14 19:06:57 +02:00
func NewFs ( name , path string , m configmap . Mapper ) ( fs . Fs , error ) {
2019-06-17 10:34:30 +02:00
ctx := context . Background ( )
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-10-13 23:45:17 +02:00
err = checkUploadCutoff ( opt . UploadCutoff )
if err != nil {
return nil , errors . Wrap ( err , "drive: upload cutoff" )
}
2018-09-07 13:02:27 +02:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
return nil , errors . Wrap ( err , "drive: chunk size" )
2015-03-14 18:55:38 +01:00
}
2018-05-14 19:06:57 +02:00
oAuthClient , err := createOAuthClient ( opt , name , m )
2018-01-20 19:04:23 +01:00
if err != nil {
return nil , errors . Wrap ( err , "drive: failed when making oauth client" )
}
2013-01-15 00:38:18 +01:00
2014-03-16 15:01:17 +01:00
root , err := parseDrivePath ( path )
if err != nil {
return nil , err
2013-01-15 00:38:18 +01:00
}
2014-07-13 18:53:11 +02:00
2015-11-07 12:14:46 +01:00
f := & Fs {
2020-05-30 13:08:22 +02:00
name : name ,
root : root ,
opt : * opt ,
pacer : newPacer ( opt ) ,
m : m ,
grouping : listRGrouping ,
listRmu : new ( sync . Mutex ) ,
listRempties : make ( map [ string ] struct { } ) ,
2014-05-05 20:52:52 +02:00
}
2018-05-14 19:06:57 +02:00
f . isTeamDrive = opt . TeamDriveID != ""
2020-04-12 17:55:11 +02:00
f . fileFields = f . getFileFields ( )
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
DuplicateFiles : true ,
ReadMimeType : true ,
WriteMimeType : true ,
CanHaveEmptyDirectories : true ,
2019-06-06 18:04:51 +02:00
ServerSideAcrossConfigs : opt . ServerSideAcrossConfigs ,
2017-08-09 16:27:43 +02:00
} ) . Fill ( f )
2013-01-15 00:38:18 +01:00
// Create a new authorized Drive client.
2015-08-18 09:55:09 +02:00
f . client = oAuthClient
2013-01-15 00:38:18 +01:00
f . svc , err = drive . New ( f . client )
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "couldn't create Drive client" )
2013-01-15 00:38:18 +01:00
}
2018-09-18 12:29:05 +02:00
if f . opt . V2DownloadMinSize >= 0 {
f . v2Svc , err = drive_v2 . New ( f . client )
if err != nil {
return nil , errors . Wrap ( err , "couldn't create Drive v2 client" )
}
}
2020-06-17 12:58:01 +02:00
// If impersonating warn about root_folder_id if set and unset it
//
// This is because rclone v1.51 and v1.52 cached root_folder_id when
// using impersonate which they shouldn't have done. It is possible
// someone is using impersonate and root_folder_id in which case this
// breaks their workflow. There isn't an easy way around that.
if opt . RootFolderID != "" && opt . Impersonate != "" {
fs . Logf ( f , "Ignoring cached root_folder_id when using --drive-impersonate" )
opt . RootFolderID = ""
}
2018-01-24 00:46:41 +01:00
// set root folder for a team drive or query the user root folder
2019-11-16 19:38:21 +01:00
if opt . RootFolderID != "" {
2020-06-17 12:58:01 +02:00
// override root folder if set or cached in the config and not impersonating
2018-05-14 19:06:57 +02:00
f . rootFolderID = opt . RootFolderID
2019-11-16 19:38:21 +01:00
} else if f . isTeamDrive {
f . rootFolderID = f . opt . TeamDriveID
2019-10-21 09:47:17 +02:00
} else {
// Look up the root ID and cache it in the config
rootID , err := f . getRootID ( )
if err != nil {
2019-11-08 11:29:45 +01:00
if gerr , ok := errors . Cause ( err ) . ( * googleapi . Error ) ; ok && gerr . Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
} else {
return nil , err
}
2019-10-21 09:47:17 +02:00
}
f . rootFolderID = rootID
2020-06-17 12:58:01 +02:00
// Don't cache the root folder ID if impersonating
if opt . Impersonate == "" {
m . Set ( "root_folder_id" , rootID )
}
2018-01-23 14:36:20 +01:00
}
2018-01-24 00:46:41 +01:00
f . dirCache = dircache . New ( root , f . rootFolderID , f )
2016-01-26 17:52:53 +01:00
// Parse extensions
2018-08-19 16:16:11 +02:00
if opt . Extensions != "" {
if opt . ExportExtensions != defaultExportExtensions {
return nil , errors . New ( "only one of 'formats' and 'export_formats' can be specified" )
}
opt . Extensions , opt . ExportExtensions = "" , opt . Extensions
}
f . exportExtensions , _ , err = parseExtensions ( opt . ExportExtensions , defaultExportExtensions )
if err != nil {
return nil , err
}
_ , f . importMimeTypes , err = parseExtensions ( opt . ImportExtensions )
2016-02-06 10:22:52 +01:00
if err != nil {
return nil , err
}
2016-01-26 17:52:53 +01:00
2014-05-05 20:52:52 +02:00
// Find the current root
2019-06-17 10:34:30 +02:00
err = f . dirCache . FindRoot ( ctx , false )
2014-05-05 20:52:52 +02:00
if err != nil {
// Assume it is a file
2015-09-03 22:25:55 +02:00
newRoot , remote := dircache . SplitPath ( root )
2018-03-28 20:33:39 +02:00
tempF := * f
tempF . dirCache = dircache . New ( newRoot , f . rootFolderID , & tempF )
tempF . root = newRoot
2014-05-05 20:52:52 +02:00
// Make new Fs which is the parent
2019-06-17 10:34:30 +02:00
err = tempF . dirCache . FindRoot ( ctx , false )
2014-05-05 20:52:52 +02:00
if err != nil {
// No root so return old f
return f , nil
}
2019-06-17 10:34:30 +02:00
_ , err := tempF . NewObject ( ctx , remote )
2014-05-05 20:52:52 +02:00
if err != nil {
2018-01-25 15:39:03 +01:00
// unable to list folder so return old f
2014-05-05 20:52:52 +02:00
return f , nil
}
2018-07-24 17:14:23 +02:00
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
2019-07-28 19:47:38 +02:00
// See https://github.com/rclone/rclone/issues/2182
2018-07-24 17:14:23 +02:00
f . dirCache = tempF . dirCache
f . root = tempF . root
return f , fs . ErrorIsFile
2014-05-05 20:52:52 +02:00
}
2015-09-03 22:25:55 +02:00
// fmt.Printf("Root id %s", f.dirCache.RootID())
2013-01-15 00:38:18 +01:00
return f , nil
}
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newBaseObject ( remote string , info * drive . File ) baseObject {
modifiedDate := info . ModifiedTime
if f . opt . UseCreatedDate {
modifiedDate = info . CreatedTime
2019-11-18 22:34:10 +01:00
} else if f . opt . UseSharedDate && info . SharedWithMeTime != "" {
modifiedDate = info . SharedWithMeTime
2013-01-15 00:38:18 +01:00
}
2019-05-25 12:27:30 +02:00
size := info . Size
if f . opt . SizeAsQuota {
size = info . QuotaBytesUsed
}
2018-08-21 12:49:33 +02:00
return baseObject {
fs : f ,
remote : remote ,
id : info . Id ,
modifiedDate : modifiedDate ,
mimeType : info . MimeType ,
2019-05-25 12:27:30 +02:00
bytes : size ,
2020-03-31 18:25:15 +02:00
parents : len ( info . Parents ) ,
2018-08-21 12:49:33 +02:00
}
}
2020-04-12 17:55:11 +02:00
// getFileFields gets the fields for a normal file Get or List
func ( f * Fs ) getFileFields ( ) ( fields googleapi . Field ) {
fields = partialFields
if f . opt . AuthOwnerOnly {
fields += ",owners"
}
if f . opt . UseSharedDate {
fields += ",sharedWithMeTime"
}
if f . opt . SkipChecksumGphotos {
fields += ",spaces"
}
if f . opt . SizeAsQuota {
fields += ",quotaBytesUsed"
}
return fields
}
2020-05-20 12:39:20 +02:00
// newRegularObject creates an fs.Object for a normal drive.File
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newRegularObject ( remote string , info * drive . File ) fs . Object {
2019-03-07 04:44:09 +01:00
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f . opt . SkipChecksumGphotos {
for _ , space := range info . Spaces {
if space == "photos" {
info . Md5Checksum = ""
break
}
}
}
2018-08-21 12:49:33 +02:00
return & Object {
baseObject : f . newBaseObject ( remote , info ) ,
2020-04-12 17:55:11 +02:00
url : fmt . Sprintf ( "%sfiles/%s?alt=media" , f . svc . BasePath , actualID ( info . Id ) ) ,
2018-08-21 12:49:33 +02:00
md5sum : strings . ToLower ( info . Md5Checksum ) ,
v2Download : f . opt . V2DownloadMinSize != - 1 && info . Size >= int64 ( f . opt . V2DownloadMinSize ) ,
2013-01-15 00:38:18 +01:00
}
2014-05-05 20:52:52 +02:00
}
2020-05-20 12:39:20 +02:00
// newDocumentObject creates an fs.Object for a google docs drive.File
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newDocumentObject ( remote string , info * drive . File , extension , exportMimeType string ) ( fs . Object , error ) {
mediaType , _ , err := mime . ParseMediaType ( exportMimeType )
2018-08-19 16:16:11 +02:00
if err != nil {
return nil , err
}
2020-04-12 17:55:11 +02:00
id := actualID ( info . Id )
url := fmt . Sprintf ( "%sfiles/%s/export?mimeType=%s" , f . svc . BasePath , id , url . QueryEscape ( mediaType ) )
2018-08-21 12:49:33 +02:00
if f . opt . AlternateExport {
switch info . MimeType {
case "application/vnd.google-apps.drawing" :
2020-04-12 17:55:11 +02:00
url = fmt . Sprintf ( "https://docs.google.com/drawings/d/%s/export/%s" , id , extension [ 1 : ] )
2018-08-21 12:49:33 +02:00
case "application/vnd.google-apps.document" :
2020-04-12 17:55:11 +02:00
url = fmt . Sprintf ( "https://docs.google.com/document/d/%s/export?format=%s" , id , extension [ 1 : ] )
2018-08-21 12:49:33 +02:00
case "application/vnd.google-apps.spreadsheet" :
2020-04-12 17:55:11 +02:00
url = fmt . Sprintf ( "https://docs.google.com/spreadsheets/d/%s/export?format=%s" , id , extension [ 1 : ] )
2018-08-21 12:49:33 +02:00
case "application/vnd.google-apps.presentation" :
2020-04-12 17:55:11 +02:00
url = fmt . Sprintf ( "https://docs.google.com/presentation/d/%s/export/%s" , id , extension [ 1 : ] )
2018-08-21 12:49:33 +02:00
}
}
baseObject := f . newBaseObject ( remote + extension , info )
baseObject . bytes = - 1
baseObject . mimeType = exportMimeType
return & documentObject {
baseObject : baseObject ,
url : url ,
documentMimeType : info . MimeType ,
extLen : len ( extension ) ,
} , nil
}
2020-05-20 12:39:20 +02:00
// newLinkObject creates an fs.Object that represents a link a google docs drive.File
2018-08-21 12:51:36 +02:00
func ( f * Fs ) newLinkObject ( remote string , info * drive . File , extension , exportMimeType string ) ( fs . Object , error ) {
t := linkTemplate ( exportMimeType )
if t == nil {
return nil , errors . Errorf ( "unsupported link type %s" , exportMimeType )
}
var buf bytes . Buffer
err := t . Execute ( & buf , struct {
URL , Title string
} {
info . WebViewLink , info . Name ,
} )
if err != nil {
return nil , errors . Wrap ( err , "executing template failed" )
}
baseObject := f . newBaseObject ( remote + extension , info )
baseObject . bytes = int64 ( buf . Len ( ) )
baseObject . mimeType = exportMimeType
return & linkObject {
baseObject : baseObject ,
content : buf . Bytes ( ) ,
extLen : len ( extension ) ,
} , nil
}
2020-05-20 12:39:20 +02:00
// newObjectWithInfo creates an fs.Object for any drive.File
2018-08-21 12:49:33 +02:00
//
2020-05-20 12:39:20 +02:00
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newObjectWithInfo ( remote string , info * drive . File ) ( fs . Object , error ) {
// If item has MD5 sum or a length it is a file stored on drive
if info . Md5Checksum != "" || info . Size > 0 {
return f . newRegularObject ( remote , info ) , nil
}
extension , exportName , exportMimeType , isDocument := f . findExportFormat ( info )
return f . newObjectWithExportInfo ( remote , info , extension , exportName , exportMimeType , isDocument )
}
2020-05-20 12:39:20 +02:00
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
2018-08-21 12:49:33 +02:00
//
2020-05-20 12:39:20 +02:00
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newObjectWithExportInfo (
remote string , info * drive . File ,
2020-04-12 17:55:11 +02:00
extension , exportName , exportMimeType string , isDocument bool ) ( o fs . Object , err error ) {
// Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing.
info , err = f . resolveShortcut ( info )
if err != nil {
return nil , errors . Wrap ( err , "new object" )
}
2018-08-21 12:49:33 +02:00
switch {
2020-04-12 17:55:11 +02:00
case info . MimeType == driveFolderType :
return nil , fs . ErrorNotAFile
case info . MimeType == shortcutMimeType :
// We can only get here if f.opt.SkipShortcuts is set
// and not from a listing. This is unlikely.
fs . Debugf ( remote , "Ignoring shortcut as skip shortcuts is set" )
return nil , fs . ErrorObjectNotFound
2018-08-21 12:49:33 +02:00
case info . Md5Checksum != "" || info . Size > 0 :
// If item has MD5 sum or a length it is a file stored on drive
return f . newRegularObject ( remote , info ) , nil
case f . opt . SkipGdocs :
fs . Debugf ( remote , "Skipping google document type %q" , info . MimeType )
2020-04-24 11:01:06 +02:00
return nil , fs . ErrorObjectNotFound
2018-08-21 12:49:33 +02:00
default :
// If item MimeType is in the ExportFormats then it is a google doc
if ! isDocument {
fs . Debugf ( remote , "Ignoring unknown document type %q" , info . MimeType )
2020-04-24 11:01:06 +02:00
return nil , fs . ErrorObjectNotFound
2018-08-21 12:49:33 +02:00
}
if extension == "" {
fs . Debugf ( remote , "No export formats found for %q" , info . MimeType )
2020-04-24 11:01:06 +02:00
return nil , fs . ErrorObjectNotFound
2018-08-21 12:49:33 +02:00
}
2018-08-21 12:51:36 +02:00
if isLinkMimeType ( exportMimeType ) {
return f . newLinkObject ( remote , info , extension , exportMimeType )
}
2018-08-21 12:49:33 +02:00
return f . newDocumentObject ( remote , info , extension , exportMimeType )
}
2018-08-19 16:16:11 +02:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
info , extension , exportName , exportMimeType , isDocument , err := f . getRemoteInfoWithExport ( ctx , remote )
2018-08-21 12:49:33 +02:00
if err != nil {
return nil , err
}
remote = remote [ : len ( remote ) - len ( extension ) ]
obj , err := f . newObjectWithExportInfo ( remote , info , extension , exportName , exportMimeType , isDocument )
switch {
case err != nil :
return nil , err
case obj == nil :
return nil , fs . ErrorObjectNotFound
default :
return obj , nil
}
2013-01-15 00:38:18 +01:00
}
2015-09-22 19:47:16 +02:00
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2019-06-17 10:34:30 +02:00
func ( f * Fs ) FindLeaf ( ctx context . Context , pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2015-09-22 19:47:16 +02:00
// Find the leaf in pathID
2020-04-12 17:55:11 +02:00
pathID = actualID ( pathID )
2019-06-17 10:34:30 +02:00
found , err = f . list ( ctx , [ ] string { pathID } , leaf , true , false , false , func ( item * drive . File ) bool {
2018-07-24 17:14:23 +02:00
if ! f . opt . SkipGdocs {
2018-08-21 12:49:33 +02:00
_ , exportName , _ , isDocument := f . findExportFormat ( item )
2018-07-24 17:14:23 +02:00
if exportName == leaf {
pathIDOut = item . Id
return true
}
2018-08-21 12:49:33 +02:00
if isDocument {
return false
}
}
if item . Name == leaf {
pathIDOut = item . Id
return true
2018-07-24 17:14:23 +02:00
}
2015-09-03 22:25:55 +02:00
return false
} )
2015-09-22 19:47:16 +02:00
return pathIDOut , found , err
2015-09-03 22:25:55 +02:00
}
2015-09-22 19:47:16 +02:00
// CreateDir makes a directory with pathID as parent and name leaf
2019-06-17 10:34:30 +02:00
func ( f * Fs ) CreateDir ( ctx context . Context , pathID , leaf string ) ( newID string , err error ) {
2020-01-14 18:33:35 +01:00
leaf = f . opt . Enc . FromStandardName ( leaf )
2015-09-03 22:25:55 +02:00
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
2020-06-17 12:32:28 +02:00
pathID = actualID ( pathID )
2015-09-03 22:25:55 +02:00
createInfo := & drive . File {
2018-01-24 00:46:41 +01:00
Name : leaf ,
2015-09-03 22:25:55 +02:00
Description : leaf ,
MimeType : driveFolderType ,
2018-01-24 00:46:41 +01:00
Parents : [ ] string { pathID } ,
2015-09-03 22:25:55 +02:00
}
var info * drive . File
2015-09-11 20:18:41 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
info , err = f . svc . Files . Create ( createInfo ) .
2018-09-01 13:16:01 +02:00
Fields ( "id" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2015-09-03 22:25:55 +02:00
} )
if err != nil {
return "" , err
}
return info . Id , nil
}
2016-02-06 10:22:52 +01:00
// isAuthOwned checks if any of the item owners is the authenticated owner
func isAuthOwned ( item * drive . File ) bool {
for _ , owner := range item . Owners {
2018-01-24 00:46:41 +01:00
if owner . Me {
2016-02-06 10:22:52 +01:00
return true
}
}
return false
}
2018-08-21 12:51:36 +02:00
// linkTemplate returns the Template for a MIME type or nil if the
// MIME type does not represent a link
func linkTemplate ( mt string ) * template . Template {
templatesOnce . Do ( func ( ) {
_linkTemplates = map [ string ] * template . Template {
"application/x-link-desktop" : template . Must (
template . New ( "application/x-link-desktop" ) . Parse ( desktopTemplate ) ) ,
"application/x-link-html" : template . Must (
template . New ( "application/x-link-html" ) . Parse ( htmlTemplate ) ) ,
"application/x-link-url" : template . Must (
template . New ( "application/x-link-url" ) . Parse ( urlTemplate ) ) ,
"application/x-link-webloc" : template . Must (
template . New ( "application/x-link-webloc" ) . Parse ( weblocTemplate ) ) ,
}
} )
return _linkTemplates [ mt ]
}
2018-08-19 16:16:11 +02:00
func ( f * Fs ) fetchFormats ( ) {
fetchFormatsOnce . Do ( func ( ) {
2018-01-31 21:03:02 +01:00
var about * drive . About
var err error
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
about , err = f . svc . About . Get ( ) .
2018-08-19 16:16:11 +02:00
Fields ( "exportFormats,importFormats" ) .
2018-08-05 12:27:17 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2018-01-31 21:03:02 +01:00
} )
if err != nil {
2018-08-19 16:16:11 +02:00
fs . Errorf ( f , "Failed to get Drive exportFormats and importFormats: %v" , err )
2018-01-31 21:03:02 +01:00
_exportFormats = map [ string ] [ ] string { }
2018-08-19 16:16:11 +02:00
_importFormats = map [ string ] [ ] string { }
2018-01-31 21:03:02 +01:00
return
}
2018-02-07 12:53:46 +01:00
_exportFormats = fixMimeTypeMap ( about . ExportFormats )
2018-08-19 16:16:11 +02:00
_importFormats = fixMimeTypeMap ( about . ImportFormats )
2018-01-31 21:03:02 +01:00
} )
2018-08-19 16:16:11 +02:00
}
// exportFormats returns the export formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not export any drive formats
func ( f * Fs ) exportFormats ( ) map [ string ] [ ] string {
f . fetchFormats ( )
2018-01-31 21:03:02 +01:00
return _exportFormats
}
2018-08-19 16:16:11 +02:00
// importFormats returns the import formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not import any drive formats
func ( f * Fs ) importFormats ( ) map [ string ] [ ] string {
f . fetchFormats ( )
return _importFormats
}
// findExportFormatByMimeType works out the optimum export settings
// for the given MIME type.
2016-02-06 10:22:52 +01:00
//
2018-08-19 16:16:11 +02:00
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false)
func ( f * Fs ) findExportFormatByMimeType ( itemMimeType string ) (
extension , mimeType string , isDocument bool ) {
exportMimeTypes , isDocument := f . exportFormats ( ) [ itemMimeType ]
2018-07-24 17:14:23 +02:00
if isDocument {
2018-08-19 16:16:11 +02:00
for _ , _extension := range f . exportExtensions {
2018-02-07 12:53:46 +01:00
_mimeType := mime . TypeByExtension ( _extension )
2018-08-21 12:51:36 +02:00
if isLinkMimeType ( _mimeType ) {
return _extension , _mimeType , true
}
2018-07-24 17:14:23 +02:00
for _ , emt := range exportMimeTypes {
2018-02-07 12:53:46 +01:00
if emt == _mimeType {
2018-08-30 18:03:48 +02:00
return _extension , emt , true
}
if _mimeType == _mimeTypeCustomTransform [ emt ] {
return _extension , emt , true
2018-07-24 17:14:23 +02:00
}
2018-01-24 00:46:41 +01:00
}
2016-02-06 10:22:52 +01:00
}
}
// else return empty
2018-08-19 16:16:11 +02:00
return "" , "" , isDocument
}
// findExportFormatByMimeType works out the optimum export settings
// for the given drive.File.
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func ( f * Fs ) findExportFormat ( item * drive . File ) ( extension , filename , mimeType string , isDocument bool ) {
extension , mimeType , isDocument = f . findExportFormatByMimeType ( item . MimeType )
if extension != "" {
filename = item . Name + extension
}
return
}
// findImportFormat finds the matching upload MIME type for a file
// If the given MIME type is in importMimeTypes, the matching upload
// MIME type is returned
//
// When no match is found "" is returned.
func ( f * Fs ) findImportFormat ( mimeType string ) string {
mimeType = fixMimeType ( mimeType )
ifs := f . importFormats ( )
for _ , mt := range f . importMimeTypes {
if mt == mimeType {
importMimeTypes := ifs [ mimeType ]
if l := len ( importMimeTypes ) ; l > 0 {
if l > 1 {
fs . Infof ( f , "found %d import formats for %q: %q" , l , mimeType , importMimeTypes )
}
return importMimeTypes [ 0 ]
}
}
}
return ""
2016-02-06 10:22:52 +01:00
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2017-06-11 23:43:31 +02:00
var iErr error
2019-06-17 10:34:30 +02:00
_ , err = f . list ( ctx , [ ] string { directoryID } , "" , false , false , false , func ( item * drive . File ) bool {
2018-07-26 15:45:41 +02:00
entry , err := f . itemToDirEntry ( path . Join ( dir , item . Name ) , item )
if err != nil {
2018-09-01 13:16:01 +02:00
iErr = err
2018-07-26 15:45:41 +02:00
return true
}
if entry != nil {
entries = append ( entries , entry )
}
return false
} )
if err != nil {
return nil , err
}
if iErr != nil {
return nil , iErr
}
2019-11-27 17:10:24 +01:00
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f . isTeamDrive && len ( entries ) == 0 && f . root == "" && dir == "" {
err = f . teamDriveOK ( ctx )
if err != nil {
return nil , err
}
}
2018-07-26 15:45:41 +02:00
return entries , nil
}
2019-02-07 16:59:00 +01:00
// listREntry is a task to be executed by a litRRunner
type listREntry struct {
id , path string
}
// listRSlices is a helper struct to sort two slices at once
type listRSlices struct {
dirs [ ] string
paths [ ] string
}
func ( s listRSlices ) Sort ( ) {
sort . Sort ( s )
}
func ( s listRSlices ) Len ( ) int {
return len ( s . dirs )
}
func ( s listRSlices ) Swap ( i , j int ) {
s . dirs [ i ] , s . dirs [ j ] = s . dirs [ j ] , s . dirs [ i ]
s . paths [ i ] , s . paths [ j ] = s . paths [ j ] , s . paths [ i ]
}
func ( s listRSlices ) Less ( i , j int ) bool {
return s . dirs [ i ] < s . dirs [ j ]
}
2020-05-20 12:39:20 +02:00
// listRRunner will read dirIDs from the in channel, perform the file listing and call cb with each DirEntry.
2018-07-26 15:45:41 +02:00
//
2019-02-07 16:59:00 +01:00
// In each cycle it will read up to grouping entries from the in channel without blocking.
2018-07-26 15:45:41 +02:00
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
// nil is send to the out channel and the function returns.
2020-05-30 13:08:22 +02:00
func ( f * Fs ) listRRunner ( ctx context . Context , wg * sync . WaitGroup , in chan listREntry , out chan <- error , cb func ( fs . DirEntry ) error ) {
2018-07-26 15:45:41 +02:00
var dirs [ ] string
2019-02-07 16:59:00 +01:00
var paths [ ] string
2020-05-30 13:08:22 +02:00
var grouping int32
2018-07-26 15:45:41 +02:00
for dir := range in {
2019-02-07 16:59:00 +01:00
dirs = append ( dirs [ : 0 ] , dir . id )
paths = append ( paths [ : 0 ] , dir . path )
2020-05-30 13:08:22 +02:00
grouping = atomic . LoadInt32 ( & f . grouping )
2018-07-26 15:45:41 +02:00
waitloop :
2020-05-30 13:08:22 +02:00
for i := int32 ( 1 ) ; i < grouping ; i ++ {
2018-07-26 15:45:41 +02:00
select {
case d , ok := <- in :
if ! ok {
break waitloop
}
2019-02-07 16:59:00 +01:00
dirs = append ( dirs , d . id )
paths = append ( paths , d . path )
default :
2018-07-26 15:45:41 +02:00
}
}
2019-02-07 16:59:00 +01:00
listRSlices { dirs , paths } . Sort ( )
2018-07-26 15:45:41 +02:00
var iErr error
2020-05-30 13:08:22 +02:00
foundItems := false
2019-06-17 10:34:30 +02:00
_ , err := f . list ( ctx , dirs , "" , false , false , false , func ( item * drive . File ) bool {
2020-03-06 16:28:36 +01:00
// shared with me items have no parents when at the root
if f . opt . SharedWithMe && len ( item . Parents ) == 0 && len ( paths ) == 1 && paths [ 0 ] == "" {
item . Parents = dirs
}
2019-02-07 16:59:00 +01:00
for _ , parent := range item . Parents {
2020-01-04 13:59:05 +01:00
var i int
2020-05-30 13:08:22 +02:00
foundItems = true
2020-03-06 16:31:23 +01:00
earlyExit := false
2020-01-04 13:59:05 +01:00
// If only one item in paths then no need to search for the ID
// assuming google drive is doing its job properly.
//
// Note that we at the root when len(paths) == 1 && paths[0] == ""
if len ( paths ) == 1 {
// don't check parents at root because
// - shared with me items have no parents at the root
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
i = 0
2020-03-06 16:31:23 +01:00
// items at root can have more than one parent so we need to put
// the item in just once.
earlyExit = true
2020-01-04 13:59:05 +01:00
} else {
// only handle parents that are in the requested dirs list if not at root
i = sort . SearchStrings ( dirs , parent )
if i == len ( dirs ) || dirs [ i ] != parent {
continue
}
2019-02-07 16:59:00 +01:00
}
remote := path . Join ( paths [ i ] , item . Name )
entry , err := f . itemToDirEntry ( remote , item )
if err != nil {
iErr = err
return true
2018-07-26 15:45:41 +02:00
}
2019-02-07 16:59:00 +01:00
err = cb ( entry )
if err != nil {
iErr = err
return true
}
2020-03-06 16:31:23 +01:00
// If didn't check parents then insert only once
if earlyExit {
break
}
2018-01-31 21:03:02 +01:00
}
2018-07-26 15:45:41 +02:00
return false
} )
2020-05-30 13:08:22 +02:00
// Found no items in more than one directory. Retry these as
// individual directories This is to work around a bug in google
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if len ( dirs ) > 1 && ! foundItems {
if atomic . SwapInt32 ( & f . grouping , 1 ) != 1 {
fs . Logf ( f , "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries" , len ( dirs ) )
}
var recycled = make ( [ ] listREntry , len ( dirs ) )
f . listRmu . Lock ( )
for i := range dirs {
recycled [ i ] = listREntry { id : dirs [ i ] , path : paths [ i ] }
// Make a note of these dirs - if they all turn
// out to be empty then we can re-enable grouping
f . listRempties [ dirs [ i ] ] = struct { } { }
}
f . listRmu . Unlock ( )
// recycle these in the background so we don't deadlock
// the listR runners if they all get here
wg . Add ( len ( recycled ) )
go func ( ) {
for _ , entry := range recycled {
in <- entry
}
fs . Debugf ( f , "Recycled %d entries" , len ( recycled ) )
} ( )
}
// If using a grouping of 1 and dir was empty then check to see if it
// is part of the group that caused grouping to be disabled.
if grouping == 1 && len ( dirs ) == 1 && ! foundItems {
f . listRmu . Lock ( )
if _ , found := f . listRempties [ dirs [ 0 ] ] ; found {
// Remove the ID
delete ( f . listRempties , dirs [ 0 ] )
// If no empties left => all the directories that
// triggered the grouping being set to 1 were actually
// empty so must have made a mistake
if len ( f . listRempties ) == 0 {
if atomic . SwapInt32 ( & f . grouping , listRGrouping ) != listRGrouping {
fs . Logf ( f , "Re-enabling ListR as previous detection was in error" )
}
}
}
f . listRmu . Unlock ( )
}
2018-07-26 15:45:41 +02:00
for range dirs {
wg . Done ( )
2013-01-15 00:38:18 +01:00
}
2018-07-26 15:45:41 +02:00
if iErr != nil {
out <- iErr
return
}
if err != nil {
out <- err
return
}
}
out <- nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) ListR ( ctx context . Context , dir string , callback fs . ListRCallback ) ( err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2018-07-26 15:45:41 +02:00
if err != nil {
return err
2017-06-11 23:43:31 +02:00
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2018-07-26 15:45:41 +02:00
mu := sync . Mutex { } // protects in and overflow
wg := sync . WaitGroup { }
2020-05-30 13:08:22 +02:00
in := make ( chan listREntry , listRInputBuffer )
2018-07-26 15:45:41 +02:00
out := make ( chan error , fs . Config . Checkers )
list := walk . NewListRHelper ( callback )
2019-02-07 18:41:17 +01:00
overflow := [ ] listREntry { }
2019-11-27 17:10:24 +01:00
listed := 0
2018-07-26 15:45:41 +02:00
cb := func ( entry fs . DirEntry ) error {
mu . Lock ( )
defer mu . Unlock ( )
if d , isDir := entry . ( * fs . Dir ) ; isDir && in != nil {
2020-04-12 17:55:11 +02:00
job := listREntry { actualID ( d . ID ( ) ) , d . Remote ( ) }
2018-07-26 15:45:41 +02:00
select {
2020-04-12 17:55:11 +02:00
case in <- job :
2020-05-30 13:08:22 +02:00
// Adding the wg after we've entered the item is
// safe here because we know when the callback
// is called we are holding a waitgroup.
2018-07-26 15:45:41 +02:00
wg . Add ( 1 )
default :
2020-04-12 17:55:11 +02:00
overflow = append ( overflow , job )
2018-07-26 15:45:41 +02:00
}
}
2019-11-27 17:10:24 +01:00
listed ++
2018-07-26 15:45:41 +02:00
return list . Add ( entry )
}
wg . Add ( 1 )
2019-02-07 16:59:00 +01:00
in <- listREntry { directoryID , dir }
2018-07-26 15:45:41 +02:00
for i := 0 ; i < fs . Config . Checkers ; i ++ {
2020-05-30 13:08:22 +02:00
go f . listRRunner ( ctx , & wg , in , out , cb )
2018-07-26 15:45:41 +02:00
}
go func ( ) {
// wait until the all directories are processed
wg . Wait ( )
// if the input channel overflowed add the collected entries to the channel now
2019-02-07 18:41:17 +01:00
for len ( overflow ) > 0 {
2018-07-26 15:45:41 +02:00
mu . Lock ( )
2019-02-07 18:41:17 +01:00
l := len ( overflow )
2020-05-20 12:39:20 +02:00
// only fill half of the channel to prevent entries being put into overflow again
2020-05-30 13:08:22 +02:00
if l > listRInputBuffer / 2 {
l = listRInputBuffer / 2
2018-07-26 15:45:41 +02:00
}
wg . Add ( l )
2019-02-07 18:41:17 +01:00
for _ , d := range overflow [ : l ] {
2018-07-26 15:45:41 +02:00
in <- d
}
2019-02-07 18:41:17 +01:00
overflow = overflow [ l : ]
2018-07-26 15:45:41 +02:00
mu . Unlock ( )
// wait again for the completion of all directories
wg . Wait ( )
}
mu . Lock ( )
if in != nil {
// notify all workers to exit
close ( in )
in = nil
}
mu . Unlock ( )
} ( )
// wait until the all workers to finish
for i := 0 ; i < fs . Config . Checkers ; i ++ {
e := <- out
mu . Lock ( )
// if one worker returns an error early, close the input so all other workers exit
if e != nil && in != nil {
err = e
close ( in )
in = nil
}
mu . Unlock ( )
}
close ( out )
if err != nil {
return err
}
2019-11-27 17:10:24 +01:00
err = list . Flush ( )
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f . isTeamDrive && listed == 0 && f . root == "" && dir == "" {
err = f . teamDriveOK ( ctx )
if err != nil {
return err
}
}
return nil
2018-07-26 15:45:41 +02:00
}
2020-04-12 17:55:11 +02:00
const shortcutSeparator = '\t'
// joinID adds an actual drive ID to the shortcut ID it came from
//
// directoryIDs in the dircache are these composite directory IDs so
// we must always unpack them before use.
func joinID ( actual , shortcut string ) string {
return actual + string ( shortcutSeparator ) + shortcut
}
// splitID separates an actual ID and a shortcut ID from a composite
// ID. If there was no shortcut ID then it will return "" for it.
func splitID ( compositeID string ) ( actualID , shortcutID string ) {
i := strings . IndexRune ( compositeID , shortcutSeparator )
if i < 0 {
return compositeID , ""
}
return compositeID [ : i ] , compositeID [ i + 1 : ]
}
// isShortcutID returns true if compositeID refers to a shortcut
func isShortcutID ( compositeID string ) bool {
return strings . IndexRune ( compositeID , shortcutSeparator ) >= 0
}
// actualID returns an actual ID from a composite ID
func actualID ( compositeID string ) ( actualID string ) {
actualID , _ = splitID ( compositeID )
return actualID
}
// shortcutID returns a shortcut ID from a composite ID if available,
// or the actual ID if not.
func shortcutID ( compositeID string ) ( shortcutID string ) {
actualID , shortcutID := splitID ( compositeID )
if shortcutID != "" {
return shortcutID
}
return actualID
}
// isShortcut returns true of the item is a shortcut
func isShortcut ( item * drive . File ) bool {
return item . MimeType == shortcutMimeType && item . ShortcutDetails != nil
}
// Dereference shortcut if required. It returns the newItem (which may
// be just item).
//
// If we return a new item then the ID will be adjusted to be a
// composite of the actual ID and the shortcut ID. This is to make
// sure that we have decided in all use places what we are doing with
// the ID.
//
// Note that we assume shortcuts can't point to shortcuts. Google
// drive web interface doesn't offer the option to create a shortcut
// to a shortcut. The documentation is silent on the issue.
func ( f * Fs ) resolveShortcut ( item * drive . File ) ( newItem * drive . File , err error ) {
if f . opt . SkipShortcuts || item . MimeType != shortcutMimeType {
return item , nil
}
if item . ShortcutDetails == nil {
fs . Errorf ( nil , "Expecting shortcutDetails in %v" , item )
return item , nil
}
newItem , err = f . getFile ( item . ShortcutDetails . TargetId , f . fileFields )
if err != nil {
return nil , errors . Wrap ( err , "failed to resolve shortcut" )
}
2020-06-11 20:23:50 +02:00
// make sure we use the Name, Parents and Trashed from the original item
2020-04-12 17:55:11 +02:00
newItem . Name = item . Name
2020-05-11 13:32:50 +02:00
newItem . Parents = item . Parents
2020-06-11 20:23:50 +02:00
newItem . Trashed = item . Trashed
2020-04-12 17:55:11 +02:00
// the new ID is a composite ID
newItem . Id = joinID ( newItem . Id , item . Id )
return newItem , nil
}
2020-05-20 12:39:20 +02:00
// itemToDirEntry converts a drive.File to an fs.DirEntry.
// When the drive.File cannot be represented as an fs.DirEntry
2018-08-21 12:49:33 +02:00
// (nil, nil) is returned.
2020-05-01 14:00:53 +02:00
func ( f * Fs ) itemToDirEntry ( remote string , item * drive . File ) ( entry fs . DirEntry , err error ) {
2018-07-26 15:45:41 +02:00
switch {
case item . MimeType == driveFolderType :
// cache the directory ID for later lookups
f . dirCache . Put ( remote , item . Id )
when , _ := time . Parse ( timeFormatIn , item . ModifiedTime )
d := fs . NewDir ( remote , when ) . SetID ( item . Id )
return d , nil
case f . opt . AuthOwnerOnly && ! isAuthOwned ( item ) :
// ignore object
default :
2020-04-24 11:01:06 +02:00
entry , err = f . newObjectWithInfo ( remote , item )
if err == fs . ErrorObjectNotFound {
return nil , nil
}
return entry , err
2018-07-26 15:45:41 +02:00
}
return nil , nil
2013-01-23 23:43:20 +01:00
}
2018-08-21 12:49:33 +02:00
// Creates a drive.File info from the parameters passed in.
2013-01-15 00:38:18 +01:00
//
2015-02-14 19:48:08 +01:00
// Used to create new objects
2019-06-17 10:34:30 +02:00
func ( f * Fs ) createFileInfo ( ctx context . Context , remote string , modTime time . Time ) ( * drive . File , error ) {
2020-05-11 18:24:37 +02:00
leaf , directoryID , err := f . dirCache . FindPath ( ctx , remote , true )
2014-04-18 18:46:57 +02:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , err
2014-04-18 18:46:57 +02:00
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2014-04-18 18:46:57 +02:00
2020-01-14 18:33:35 +01:00
leaf = f . opt . Enc . FromStandardName ( leaf )
2014-04-18 18:46:57 +02:00
// Define the metadata for the file we are going to create.
2015-02-02 18:29:08 +01:00
createInfo := & drive . File {
2018-01-24 00:46:41 +01:00
Name : leaf ,
2014-04-18 18:46:57 +02:00
Description : leaf ,
2018-01-24 00:46:41 +01:00
Parents : [ ] string { directoryID } ,
ModifiedTime : modTime . Format ( timeFormatOut ) ,
2014-04-18 18:46:57 +02:00
}
2018-08-21 12:49:33 +02:00
return createInfo , nil
2015-02-14 19:48:08 +01:00
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
exisitingObj , err := f . NewObject ( ctx , src . Remote ( ) )
2016-06-12 16:06:27 +02:00
switch err {
case nil :
2019-06-17 10:34:30 +02:00
return exisitingObj , exisitingObj . Update ( ctx , in , src , options ... )
2016-06-25 22:23:20 +02:00
case fs . ErrorObjectNotFound :
2016-06-12 16:06:27 +02:00
// Not found so create it
2019-06-17 10:34:30 +02:00
return f . PutUnchecked ( ctx , in , src , options ... )
2016-06-12 16:06:27 +02:00
default :
return nil , err
}
}
2017-08-03 21:42:35 +02:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PutStream ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( ctx , in , src , options ... )
2017-08-03 21:42:35 +02:00
}
2016-06-12 16:06:27 +02:00
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PutUnchecked ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2016-02-18 12:35:25 +01:00
remote := src . Remote ( )
size := src . Size ( )
2019-06-17 10:34:30 +02:00
modTime := src . ModTime ( ctx )
2018-08-19 16:16:11 +02:00
srcMimeType := fs . MimeTypeFromName ( remote )
srcExt := path . Ext ( remote )
exportExt := ""
importMimeType := ""
if f . importMimeTypes != nil && ! f . opt . SkipGdocs {
importMimeType = f . findImportFormat ( srcMimeType )
if isInternalMimeType ( importMimeType ) {
remote = remote [ : len ( remote ) - len ( srcExt ) ]
2018-08-21 12:49:33 +02:00
exportExt , _ , _ = f . findExportFormatByMimeType ( importMimeType )
2018-08-19 16:16:11 +02:00
if exportExt == "" {
return nil , errors . Errorf ( "No export format found for %q" , importMimeType )
}
if exportExt != srcExt && ! f . opt . AllowImportNameChange {
return nil , errors . Errorf ( "Can't convert %q to a document with a different export filetype (%q)" , srcExt , exportExt )
}
}
}
2016-02-18 12:35:25 +01:00
2019-06-17 10:34:30 +02:00
createInfo , err := f . createFileInfo ( ctx , remote , modTime )
2015-02-14 19:48:08 +01:00
if err != nil {
return nil , err
}
2018-08-19 16:16:11 +02:00
if importMimeType != "" {
createInfo . MimeType = importMimeType
2019-07-04 09:13:27 +02:00
} else {
createInfo . MimeType = fs . MimeTypeFromName ( remote )
2018-08-19 16:16:11 +02:00
}
2014-04-18 18:46:57 +02:00
2015-02-02 18:29:08 +01:00
var info * drive . File
2019-05-11 11:03:51 +02:00
if size >= 0 && size < int64 ( f . opt . UploadCutoff ) {
2015-03-02 10:05:23 +01:00
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
2015-09-11 20:18:41 +02:00
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
info , err = f . svc . Files . Create ( createInfo ) .
2018-08-19 16:16:11 +02:00
Media ( in , googleapi . ContentType ( srcMimeType ) ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
KeepRevisionForever ( f . opt . KeepRevisionForever ) .
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2015-09-11 20:18:41 +02:00
} )
2015-03-02 10:05:23 +01:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , err
2015-03-02 10:05:23 +01:00
}
} else {
// Upload the file in chunks
2019-09-04 21:21:10 +02:00
info , err = f . Upload ( ctx , in , size , srcMimeType , "" , remote , createInfo )
2015-03-02 10:05:23 +01:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , err
2015-03-02 10:05:23 +01:00
}
2014-04-18 18:46:57 +02:00
}
2018-08-21 12:49:33 +02:00
return f . newObjectWithInfo ( remote , info )
2013-01-15 00:38:18 +01:00
}
2017-08-02 17:51:24 +02:00
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) MergeDirs ( ctx context . Context , dirs [ ] fs . Directory ) error {
2020-04-12 17:55:11 +02:00
if len ( dirs ) < 2 {
return nil
}
newDirs := dirs [ : 0 ]
for _ , dir := range dirs {
if isShortcutID ( dir . ID ( ) ) {
fs . Infof ( dir , "skipping shortcut directory" )
continue
}
newDirs = append ( newDirs , dir )
}
dirs = newDirs
2017-08-02 17:51:24 +02:00
if len ( dirs ) < 2 {
return nil
}
dstDir := dirs [ 0 ]
for _ , srcDir := range dirs [ 1 : ] {
2020-05-25 08:05:53 +02:00
// list the objects
2017-08-02 17:51:24 +02:00
infos := [ ] * drive . File { }
2019-06-17 10:34:30 +02:00
_ , err := f . list ( ctx , [ ] string { srcDir . ID ( ) } , "" , false , false , true , func ( info * drive . File ) bool {
2017-08-02 17:51:24 +02:00
infos = append ( infos , info )
return false
} )
if err != nil {
return errors . Wrapf ( err , "MergeDirs list failed on %v" , srcDir )
}
// move them into place
for _ , info := range infos {
2018-01-24 00:46:41 +01:00
fs . Infof ( srcDir , "merging %q" , info . Name )
2017-08-02 17:51:24 +02:00
// Move the file into the destination
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
_ , err = f . svc . Files . Update ( info . Id , nil ) .
RemoveParents ( srcDir . ID ( ) ) .
AddParents ( dstDir . ID ( ) ) .
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2017-08-02 17:51:24 +02:00
} )
if err != nil {
2019-02-07 18:41:17 +01:00
return errors . Wrapf ( err , "MergeDirs move failed on %q in %v" , info . Name , srcDir )
2017-08-02 17:51:24 +02:00
}
}
// rmdir (into trash) the now empty source directory
2018-04-12 19:01:57 +02:00
fs . Infof ( srcDir , "removing empty directory" )
2020-04-12 17:55:11 +02:00
err = f . delete ( ctx , srcDir . ID ( ) , true )
2017-08-02 17:51:24 +02:00
if err != nil {
2019-02-07 18:41:17 +01:00
return errors . Wrapf ( err , "MergeDirs move failed to rmdir %q" , srcDir )
2017-08-02 17:51:24 +02:00
}
}
return nil
}
2013-01-15 00:38:18 +01:00
// Mkdir creates the container if it doesn't exist
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2020-05-11 18:24:37 +02:00
_ , err := f . dirCache . FindDir ( ctx , dir , true )
2016-11-25 22:52:43 +01:00
return err
2013-01-15 00:38:18 +01:00
}
2020-04-12 17:55:11 +02:00
// delete a file or directory unconditionally by ID
func ( f * Fs ) delete ( ctx context . Context , id string , useTrash bool ) error {
2017-08-02 17:51:24 +02:00
return f . pacer . Call ( func ( ) ( bool , error ) {
var err error
if useTrash {
2018-01-24 00:46:41 +01:00
info := drive . File {
Trashed : true ,
}
2020-04-12 17:55:11 +02:00
_ , err = f . svc . Files . Update ( id , & info ) .
2018-08-05 12:27:17 +02:00
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2017-08-02 17:51:24 +02:00
} else {
2020-04-12 17:55:11 +02:00
err = f . svc . Files . Delete ( id ) .
2018-08-05 12:27:17 +02:00
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2017-08-02 17:51:24 +02:00
}
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2017-08-02 17:51:24 +02:00
} )
}
2017-02-16 13:29:37 +01:00
// Rmdir deletes a directory
2013-01-15 00:38:18 +01:00
//
// Returns an error if it isn't empty
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
2016-11-25 22:52:43 +01:00
root := path . Join ( f . root , dir )
dc := f . dirCache
2019-06-17 10:34:30 +02:00
directoryID , err := dc . FindDir ( ctx , dir , false )
2013-01-15 00:38:18 +01:00
if err != nil {
return err
}
2020-04-12 17:55:11 +02:00
directoryID , shortcutID := splitID ( directoryID )
// if directory is a shortcut remove it regardless
if shortcutID != "" {
return f . delete ( ctx , shortcutID , f . opt . UseTrash )
}
2017-02-16 13:29:37 +01:00
var trashedFiles = false
2019-06-17 10:34:30 +02:00
found , err := f . list ( ctx , [ ] string { directoryID } , "" , false , false , true , func ( item * drive . File ) bool {
2018-01-24 00:46:41 +01:00
if ! item . Trashed {
fs . Debugf ( dir , "Rmdir: contains file: %q" , item . Name )
2017-02-16 13:29:37 +01:00
return true
}
2018-01-24 00:46:41 +01:00
fs . Debugf ( dir , "Rmdir: contains trashed file: %q" , item . Name )
2017-02-16 13:29:37 +01:00
trashedFiles = true
return false
2015-02-02 18:29:08 +01:00
} )
2013-01-15 00:38:18 +01:00
if err != nil {
return err
}
2017-02-16 13:29:37 +01:00
if found {
return errors . Errorf ( "directory not empty" )
2013-01-15 00:38:18 +01:00
}
2016-11-25 22:52:43 +01:00
if root != "" {
2017-08-02 17:51:24 +02:00
// trash the directory if it had trashed files
// in or the user wants to trash, otherwise
// delete it.
2020-04-12 17:55:11 +02:00
err = f . delete ( ctx , directoryID , trashedFiles || f . opt . UseTrash )
2013-01-18 18:01:47 +01:00
if err != nil {
return err
}
}
2016-11-25 22:52:43 +01:00
f . dirCache . FlushDir ( dir )
if err != nil {
return err
}
2013-01-18 18:01:47 +01:00
return nil
}
2015-09-22 19:47:16 +02:00
// Precision of the object storage system
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Precision ( ) time . Duration {
2013-01-19 00:21:02 +01:00
return time . Millisecond
}
2015-02-14 19:48:08 +01:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2018-08-21 12:49:33 +02:00
var srcObj * baseObject
ext := ""
2020-02-28 17:45:35 +01:00
readDescription := false
2018-08-21 12:49:33 +02:00
switch src := src . ( type ) {
case * Object :
srcObj = & src . baseObject
case * documentObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2020-02-28 17:45:35 +01:00
readDescription = true
2018-08-21 12:51:36 +02:00
case * linkObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2018-08-21 12:49:33 +02:00
default :
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-02-14 19:48:08 +01:00
return nil , fs . ErrorCantCopy
}
2018-08-21 12:49:33 +02:00
if ext != "" {
if ! strings . HasSuffix ( remote , ext ) {
fs . Debugf ( src , "Can't copy - not same document type" )
return nil , fs . ErrorCantCopy
}
remote = remote [ : len ( remote ) - len ( ext ) ]
2016-02-28 10:35:28 +01:00
}
2015-02-14 19:48:08 +01:00
2019-03-25 18:12:10 +01:00
// Look to see if there is an existing object
2019-06-17 10:34:30 +02:00
existingObject , _ := f . NewObject ( ctx , remote )
2019-03-25 18:12:10 +01:00
2019-06-17 10:34:30 +02:00
createInfo , err := f . createFileInfo ( ctx , remote , src . ModTime ( ctx ) )
2015-02-14 19:48:08 +01:00
if err != nil {
return nil , err
}
2020-02-28 17:45:35 +01:00
if readDescription {
// preserve the description on copy for docs
2020-04-12 17:55:11 +02:00
info , err := f . getFile ( actualID ( srcObj . id ) , "description" )
2020-02-28 17:45:35 +01:00
if err != nil {
return nil , errors . Wrap ( err , "failed to read description for Google Doc" )
}
2020-05-19 17:48:02 +02:00
createInfo . Description = info . Description
2020-02-28 17:45:35 +01:00
} else {
// don't overwrite the description on copy for files
// this should work for docs but it doesn't - it is probably a bug in Google Drive
createInfo . Description = ""
}
2020-04-12 17:55:11 +02:00
// get the ID of the thing to copy - this is the shortcut if available
id := shortcutID ( srcObj . id )
2015-02-14 19:48:08 +01:00
var info * drive . File
2018-08-21 12:49:33 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
info , err = f . svc . Files . Copy ( id , createInfo ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
KeepRevisionForever ( f . opt . KeepRevisionForever ) .
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2015-02-14 19:48:08 +01:00
} )
if err != nil {
return nil , err
}
2019-03-25 18:12:10 +01:00
newObject , err := f . newObjectWithInfo ( remote , info )
if err != nil {
return nil , err
}
if existingObject != nil {
2019-06-17 10:34:30 +02:00
err = existingObject . Remove ( ctx )
2019-03-25 18:12:10 +01:00
if err != nil {
fs . Errorf ( existingObject , "Failed to remove existing object after copy: %v" , err )
}
}
return newObject , nil
2015-02-14 19:48:08 +01:00
}
2013-01-18 18:01:47 +01:00
// Purge deletes all the files and the container
//
2014-07-13 10:30:14 +02:00
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Purge ( ctx context . Context ) error {
2013-01-18 18:01:47 +01:00
if f . root == "" {
2016-06-12 16:06:02 +02:00
return errors . New ( "can't purge root directory" )
2013-01-18 18:01:47 +01:00
}
2019-08-14 14:34:52 +02:00
if f . opt . TrashedOnly {
return errors . New ( "Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files" )
}
2020-05-11 18:24:37 +02:00
rootID , err := f . dirCache . RootID ( ctx , false )
2013-01-18 18:01:47 +01:00
if err != nil {
return err
}
2020-05-11 18:24:37 +02:00
err = f . delete ( ctx , shortcutID ( rootID ) , f . opt . UseTrash )
2015-09-03 22:25:55 +02:00
f . dirCache . ResetRoot ( )
2013-01-15 00:38:18 +01:00
if err != nil {
return err
}
return nil
}
2017-09-01 19:54:14 +02:00
// CleanUp empties the trash
2019-06-17 10:34:30 +02:00
func ( f * Fs ) CleanUp ( ctx context . Context ) error {
2017-09-01 19:54:14 +02:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-06 14:50:16 +02:00
err := f . svc . Files . EmptyTrash ( ) . Context ( ctx ) . Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2017-09-01 19:54:14 +02:00
} )
if err != nil {
return err
}
return nil
}
2019-11-27 17:10:24 +01:00
// teamDriveOK checks to see if we can access the team drive
func ( f * Fs ) teamDriveOK ( ctx context . Context ) ( err error ) {
if ! f . isTeamDrive {
return nil
}
var td * drive . Drive
err = f . pacer . Call ( func ( ) ( bool , error ) {
td , err = f . svc . Drives . Get ( f . opt . TeamDriveID ) . Fields ( "name,id,capabilities,createdTime,restrictions" ) . Context ( ctx ) . Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2019-11-27 17:10:24 +01:00
} )
if err != nil {
return errors . Wrap ( err , "failed to get Team/Shared Drive info" )
}
fs . Debugf ( f , "read info from team drive %q" , td . Name )
return err
}
2018-02-09 21:48:32 +01:00
// About gets quota information
2019-06-17 10:34:30 +02:00
func ( f * Fs ) About ( ctx context . Context ) ( * fs . Usage , error ) {
2018-05-03 09:51:08 +02:00
if f . isTeamDrive {
2019-11-27 17:10:24 +01:00
err := f . teamDriveOK ( ctx )
if err != nil {
return nil , err
}
2018-05-03 09:51:08 +02:00
// Teamdrives don't appear to have a usage API so just return empty
return & fs . Usage { } , nil
}
2018-02-09 21:48:32 +01:00
var about * drive . About
var err error
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-06 14:50:16 +02:00
about , err = f . svc . About . Get ( ) . Fields ( "storageQuota" ) . Context ( ctx ) . Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2018-02-09 21:48:32 +01:00
} )
if err != nil {
2018-04-16 23:19:25 +02:00
return nil , errors . Wrap ( err , "failed to get Drive storageQuota" )
2018-02-09 21:48:32 +01:00
}
2018-04-16 23:19:25 +02:00
q := about . StorageQuota
usage := & fs . Usage {
Used : fs . NewUsageValue ( q . UsageInDrive ) , // bytes in use
Trashed : fs . NewUsageValue ( q . UsageInDriveTrash ) , // bytes in trash
Other : fs . NewUsageValue ( q . Usage - q . UsageInDrive ) , // other usage eg gmail in drive
}
if q . Limit > 0 {
usage . Total = fs . NewUsageValue ( q . Limit ) // quota of bytes that can be used
usage . Free = fs . NewUsageValue ( q . Limit - q . Usage ) // bytes which can be uploaded before reaching the quota
}
return usage , nil
2018-02-09 21:48:32 +01:00
}
2015-08-31 22:05:51 +02:00
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Move ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2018-08-21 12:49:33 +02:00
var srcObj * baseObject
ext := ""
switch src := src . ( type ) {
case * Object :
srcObj = & src . baseObject
case * documentObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2018-08-21 12:51:36 +02:00
case * linkObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2018-08-21 12:49:33 +02:00
default :
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't move - not same remote type" )
2015-08-31 22:05:51 +02:00
return nil , fs . ErrorCantMove
}
2018-08-21 12:49:33 +02:00
if ext != "" {
if ! strings . HasSuffix ( remote , ext ) {
fs . Debugf ( src , "Can't move - not same document type" )
return nil , fs . ErrorCantMove
}
remote = remote [ : len ( remote ) - len ( ext ) ]
2016-02-28 10:35:28 +01:00
}
2018-08-21 12:49:33 +02:00
2019-06-17 10:34:30 +02:00
_ , srcParentID , err := srcObj . fs . dirCache . FindPath ( ctx , src . Remote ( ) , false )
2018-01-24 00:46:41 +01:00
if err != nil {
return nil , err
}
2020-04-12 17:55:11 +02:00
srcParentID = actualID ( srcParentID )
2015-08-31 22:05:51 +02:00
2016-06-25 22:58:34 +02:00
// Temporary Object under construction
2019-06-17 10:34:30 +02:00
dstInfo , err := f . createFileInfo ( ctx , remote , src . ModTime ( ctx ) )
2015-08-31 22:05:51 +02:00
if err != nil {
return nil , err
}
2018-01-24 00:46:41 +01:00
dstParents := strings . Join ( dstInfo . Parents , "," )
dstInfo . Parents = nil
2015-08-31 22:05:51 +02:00
// Do the move
2016-03-06 18:36:05 +01:00
var info * drive . File
err = f . pacer . Call ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
info , err = f . svc . Files . Update ( shortcutID ( srcObj . id ) , dstInfo ) .
2018-08-05 12:27:17 +02:00
RemoveParents ( srcParentID ) .
AddParents ( dstParents ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2016-03-06 18:36:05 +01:00
} )
2015-08-31 22:05:51 +02:00
if err != nil {
return nil , err
}
2018-08-21 12:49:33 +02:00
return f . newObjectWithInfo ( remote , info )
2015-08-31 22:05:51 +02:00
}
2018-03-29 09:10:19 +02:00
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
2020-05-31 23:18:01 +02:00
func ( f * Fs ) PublicLink ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error ) {
2019-06-17 10:34:30 +02:00
id , err := f . dirCache . FindDir ( ctx , remote , false )
2018-03-29 09:10:19 +02:00
if err == nil {
fs . Debugf ( f , "attempting to share directory '%s'" , remote )
2020-04-12 17:55:11 +02:00
id = shortcutID ( id )
2018-03-29 09:10:19 +02:00
} else {
fs . Debugf ( f , "attempting to share single file '%s'" , remote )
2019-06-17 10:34:30 +02:00
o , err := f . NewObject ( ctx , remote )
2018-08-21 12:49:33 +02:00
if err != nil {
return "" , err
2018-03-29 09:10:19 +02:00
}
2020-04-12 17:55:11 +02:00
id = shortcutID ( o . ( fs . IDer ) . ID ( ) )
2018-03-29 09:10:19 +02:00
}
permission := & drive . Permission {
AllowFileDiscovery : false ,
Role : "reader" ,
Type : "anyone" ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
2018-08-05 12:27:17 +02:00
_ , err = f . svc . Permissions . Create ( id , permission ) .
2018-09-01 13:16:01 +02:00
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2018-03-29 09:10:19 +02:00
} )
if err != nil {
return "" , err
}
return fmt . Sprintf ( "https://drive.google.com/open?id=%s" , id ) , nil
}
2017-02-05 22:20:56 +01:00
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
2015-08-31 22:05:51 +02:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
2019-06-17 10:34:30 +02:00
func ( f * Fs ) DirMove ( ctx context . Context , src fs . Fs , srcRemote , dstRemote string ) error {
2015-11-07 12:14:46 +01:00
srcFs , ok := src . ( * Fs )
2015-08-31 22:05:51 +02:00
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( srcFs , "Can't move directory - not same remote type" )
2015-08-31 22:05:51 +02:00
return fs . ErrorCantDirMove
}
2017-02-05 22:20:56 +01:00
srcPath := path . Join ( srcFs . root , srcRemote )
dstPath := path . Join ( f . root , dstRemote )
2015-08-31 22:05:51 +02:00
2016-12-09 16:41:09 +01:00
// Refuse to move to or from the root
2017-02-05 22:20:56 +01:00
if srcPath == "" || dstPath == "" {
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "DirMove error: Can't move root" )
2016-12-09 16:41:09 +01:00
return errors . New ( "can't move root directory" )
}
2017-02-05 22:20:56 +01:00
// Find ID of dst parent, creating subdirs if necessary
2020-05-11 18:24:37 +02:00
leaf , dstDirectoryID , err := f . dirCache . FindPath ( ctx , dstRemote , true )
2017-02-05 22:20:56 +01:00
if err != nil {
return err
}
2020-04-12 17:55:11 +02:00
dstDirectoryID = actualID ( dstDirectoryID )
2017-02-05 22:20:56 +01:00
// Check destination does not exist
2020-05-11 18:24:37 +02:00
_ , err = f . dirCache . FindDir ( ctx , dstRemote , false )
if err == fs . ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs . ErrorDirExists
2017-02-05 22:20:56 +01:00
}
2018-01-24 00:46:41 +01:00
// Find ID of src parent
2020-05-11 18:24:37 +02:00
_ , srcDirectoryID , err := srcFs . dirCache . FindPath ( ctx , srcRemote , false )
2018-01-24 00:46:41 +01:00
if err != nil {
return err
}
2020-04-12 17:55:11 +02:00
srcDirectoryID = actualID ( srcDirectoryID )
2018-04-14 18:15:00 +02:00
2017-02-05 22:20:56 +01:00
// Find ID of src
2019-06-17 10:34:30 +02:00
srcID , err := srcFs . dirCache . FindDir ( ctx , srcRemote , false )
2015-08-31 22:05:51 +02:00
if err != nil {
2015-09-03 22:25:55 +02:00
return err
2015-08-31 22:05:51 +02:00
}
// Do the move
patch := drive . File {
2018-01-24 00:46:41 +01:00
Name : leaf ,
2015-08-31 22:05:51 +02:00
}
2016-03-06 18:36:05 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
_ , err = f . svc . Files . Update ( shortcutID ( srcID ) , & patch ) .
2018-08-05 12:27:17 +02:00
RemoveParents ( srcDirectoryID ) .
AddParents ( dstDirectoryID ) .
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2016-03-06 18:36:05 +01:00
} )
2015-08-31 22:05:51 +02:00
if err != nil {
return err
}
2017-02-05 22:20:56 +01:00
srcFs . dirCache . FlushDir ( srcRemote )
2015-08-31 22:05:51 +02:00
return nil
}
2018-03-08 21:03:34 +01:00
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
2017-05-25 23:05:49 +02:00
//
2019-02-07 18:41:17 +01:00
// Automatically restarts itself in case of unexpected behavior of the remote.
2017-05-25 23:05:49 +02:00
//
// Close the returned channel to stop being notified.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) ChangeNotify ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , pollIntervalChan <- chan time . Duration ) {
2017-05-25 23:05:49 +02:00
go func ( ) {
2018-08-25 21:28:57 +02:00
// get the StartPageToken early so all changes from now on get processed
startPageToken , err := f . changeNotifyStartPageToken ( )
if err != nil {
fs . Infof ( f , "Failed to get StartPageToken: %s" , err )
}
var ticker * time . Ticker
var tickerC <- chan time . Time
for {
select {
case pollInterval , ok := <- pollIntervalChan :
if ! ok {
if ticker != nil {
ticker . Stop ( )
}
return
}
if ticker != nil {
ticker . Stop ( )
ticker , tickerC = nil , nil
}
if pollInterval != 0 {
ticker = time . NewTicker ( pollInterval )
tickerC = ticker . C
}
case <- tickerC :
if startPageToken == "" {
startPageToken , err = f . changeNotifyStartPageToken ( )
if err != nil {
fs . Infof ( f , "Failed to get StartPageToken: %s" , err )
continue
}
}
fs . Debugf ( f , "Checking for changes on remote" )
2019-09-06 14:50:16 +02:00
startPageToken , err = f . changeNotifyRunner ( ctx , notifyFunc , startPageToken )
2018-08-25 21:28:57 +02:00
if err != nil {
fs . Infof ( f , "Change notify listener failure: %s" , err )
}
2017-05-25 23:05:49 +02:00
}
}
} ( )
}
2018-08-25 21:28:57 +02:00
func ( f * Fs ) changeNotifyStartPageToken ( ) ( pageToken string , err error ) {
2017-05-25 23:05:49 +02:00
var startPageToken * drive . StartPageToken
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-10-20 17:05:53 +02:00
changes := f . svc . Changes . GetStartPageToken ( ) . SupportsAllDrives ( true )
if f . isTeamDrive {
changes . DriveId ( f . opt . TeamDriveID )
}
startPageToken , err = changes . Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2017-05-25 23:05:49 +02:00
} )
if err != nil {
return
}
2018-08-25 21:28:57 +02:00
return startPageToken . StartPageToken , nil
}
2017-05-25 23:05:49 +02:00
2019-09-06 14:50:16 +02:00
func ( f * Fs ) changeNotifyRunner ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , startPageToken string ) ( newStartPageToken string , err error ) {
2018-08-25 21:28:57 +02:00
pageToken := startPageToken
2017-05-25 23:05:49 +02:00
for {
2018-03-08 21:03:34 +01:00
var changeList * drive . ChangeList
2017-05-25 23:05:49 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
changesCall := f . svc . Changes . List ( pageToken ) .
Fields ( "nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))" )
2018-05-14 19:06:57 +02:00
if f . opt . ListChunk > 0 {
changesCall . PageSize ( f . opt . ListChunk )
2017-05-25 23:05:49 +02:00
}
2019-07-30 19:49:06 +02:00
changesCall . SupportsAllDrives ( true )
changesCall . IncludeItemsFromAllDrives ( true )
2018-06-07 12:35:55 +02:00
if f . isTeamDrive {
2019-10-20 17:05:53 +02:00
changesCall . DriveId ( f . opt . TeamDriveID )
2018-06-07 12:35:55 +02:00
}
2019-10-24 13:51:01 +02:00
// If using appDataFolder then need to add Spaces
if f . rootFolderID == "appDataFolder" {
changesCall . Spaces ( "appDataFolder" )
}
2019-09-06 14:50:16 +02:00
changeList , err = changesCall . Context ( ctx ) . Do ( )
2020-01-12 16:47:31 +01:00
return f . shouldRetry ( err )
2017-05-25 23:05:49 +02:00
} )
if err != nil {
return
}
2018-03-08 21:03:34 +01:00
type entryType struct {
path string
entryType fs . EntryType
}
var pathsToClear [ ] entryType
2018-01-24 00:46:41 +01:00
for _ , change := range changeList . Changes {
2018-09-25 17:55:33 +02:00
// find the previous path
2017-05-25 23:05:49 +02:00
if path , ok := f . dirCache . GetInv ( change . FileId ) ; ok {
2018-03-08 21:03:34 +01:00
if change . File != nil && change . File . MimeType != driveFolderType {
pathsToClear = append ( pathsToClear , entryType { path : path , entryType : fs . EntryObject } )
} else {
pathsToClear = append ( pathsToClear , entryType { path : path , entryType : fs . EntryDirectory } )
}
2017-05-25 23:05:49 +02:00
}
2018-09-25 17:55:33 +02:00
// find the new path
2018-05-02 15:12:47 +02:00
if change . File != nil {
2020-01-14 18:33:35 +01:00
change . File . Name = f . opt . Enc . ToStandardName ( change . File . Name )
2018-05-02 15:12:47 +02:00
changeType := fs . EntryDirectory
if change . File . MimeType != driveFolderType {
changeType = fs . EntryObject
}
2018-03-08 21:03:34 +01:00
// translate the parent dir of this object
if len ( change . File . Parents ) > 0 {
2019-02-07 16:18:52 +01:00
for _ , parent := range change . File . Parents {
if parentPath , ok := f . dirCache . GetInv ( parent ) ; ok {
// and append the drive file name to compute the full file name
newPath := path . Join ( parentPath , change . File . Name )
// this will now clear the actual file too
pathsToClear = append ( pathsToClear , entryType { path : newPath , entryType : changeType } )
}
2017-05-25 23:05:49 +02:00
}
2018-03-08 21:03:34 +01:00
} else { // a true root object that is changed
2018-05-02 15:12:47 +02:00
pathsToClear = append ( pathsToClear , entryType { path : change . File . Name , entryType : changeType } )
2017-05-25 23:05:49 +02:00
}
}
}
2018-03-08 21:03:34 +01:00
2018-09-25 17:50:33 +02:00
visitedPaths := make ( map [ string ] struct { } )
2018-03-08 21:03:34 +01:00
for _ , entry := range pathsToClear {
if _ , ok := visitedPaths [ entry . path ] ; ok {
2017-05-25 23:05:49 +02:00
continue
}
2018-09-25 17:50:33 +02:00
visitedPaths [ entry . path ] = struct { } { }
2018-03-08 21:03:34 +01:00
notifyFunc ( entry . path , entry . entryType )
2017-05-25 23:05:49 +02:00
}
2018-08-25 21:28:57 +02:00
switch {
case changeList . NewStartPageToken != "" :
return changeList . NewStartPageToken , nil
case changeList . NextPageToken != "" :
2017-05-25 23:05:49 +02:00
pageToken = changeList . NextPageToken
2018-08-25 21:28:57 +02:00
default :
2017-05-25 23:05:49 +02:00
return
}
}
}
2016-12-09 16:39:29 +01:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-18 21:27:52 +01:00
return hash . Set ( hash . MD5 )
2016-01-11 13:39:33 +01:00
}
2020-04-29 19:54:16 +02:00
func ( f * Fs ) changeChunkSize ( chunkSizeString string ) ( err error ) {
chunkSizeInt , err := strconv . ParseInt ( chunkSizeString , 10 , 64 )
if err != nil {
return errors . Wrap ( err , "couldn't convert chunk size to int" )
}
chunkSize := fs . SizeSuffix ( chunkSizeInt )
if chunkSize == f . opt . ChunkSize {
return nil
}
err = checkUploadChunkSize ( chunkSize )
if err == nil {
f . opt . ChunkSize = chunkSize
}
return err
}
func ( f * Fs ) changeServiceAccountFile ( file string ) ( err error ) {
fs . Debugf ( nil , "Changing Service Account File from %s to %s" , f . opt . ServiceAccountFile , file )
if file == f . opt . ServiceAccountFile {
return nil
}
oldSvc := f . svc
oldv2Svc := f . v2Svc
oldOAuthClient := f . client
oldFile := f . opt . ServiceAccountFile
oldCredentials := f . opt . ServiceAccountCredentials
defer func ( ) {
// Undo all the changes instead of doing selective undo's
if err != nil {
f . svc = oldSvc
f . v2Svc = oldv2Svc
f . client = oldOAuthClient
f . opt . ServiceAccountFile = oldFile
f . opt . ServiceAccountCredentials = oldCredentials
}
} ( )
f . opt . ServiceAccountFile = file
f . opt . ServiceAccountCredentials = ""
oAuthClient , err := createOAuthClient ( & f . opt , f . name , f . m )
if err != nil {
return errors . Wrap ( err , "drive: failed when making oauth client" )
}
f . client = oAuthClient
f . svc , err = drive . New ( f . client )
if err != nil {
return errors . Wrap ( err , "couldn't create Drive client" )
}
if f . opt . V2DownloadMinSize >= 0 {
f . v2Svc , err = drive_v2 . New ( f . client )
if err != nil {
return errors . Wrap ( err , "couldn't create Drive v2 client" )
}
}
return nil
}
2020-05-07 19:35:39 +02:00
// Create a shortcut from (f, srcPath) to (dstFs, dstPath)
//
// Will not overwrite existing files
2020-05-11 12:37:48 +02:00
func ( f * Fs ) makeShortcut ( ctx context . Context , srcPath string , dstFs * Fs , dstPath string ) ( o fs . Object , err error ) {
2020-05-07 19:35:39 +02:00
srcFs := f
2020-05-11 12:37:48 +02:00
srcPath = strings . Trim ( srcPath , "/" )
dstPath = strings . Trim ( dstPath , "/" )
if dstPath == "" {
return nil , errors . New ( "shortcut destination can't be root directory" )
}
2020-05-07 19:35:39 +02:00
// Find source
var srcID string
isDir := false
2020-05-11 12:37:48 +02:00
if srcPath == "" {
// source is root directory
2020-05-11 18:24:37 +02:00
srcID , err = f . dirCache . RootID ( ctx , false )
2020-05-11 12:37:48 +02:00
if err != nil {
return nil , err
}
isDir = true
} else if srcObj , err := srcFs . NewObject ( ctx , srcPath ) ; err != nil {
2020-05-07 19:35:39 +02:00
if err != fs . ErrorNotAFile {
return nil , errors . Wrap ( err , "can't find source" )
}
// source was a directory
srcID , err = srcFs . dirCache . FindDir ( ctx , srcPath , false )
if err != nil {
return nil , errors . Wrap ( err , "failed to find source dir" )
}
isDir = true
} else {
// source was a file
srcID = srcObj . ( * Object ) . id
}
srcID = actualID ( srcID ) // link to underlying object not to shortcut
2020-04-29 19:54:16 +02:00
2020-05-07 19:35:39 +02:00
// Find destination
_ , err = dstFs . NewObject ( ctx , dstPath )
if err != fs . ErrorObjectNotFound {
if err == nil {
err = errors . New ( "existing file" )
} else if err == fs . ErrorNotAFile {
err = errors . New ( "existing directory" )
}
return nil , errors . Wrap ( err , "not overwriting shortcut target" )
}
// Create destination shortcut
createInfo , err := dstFs . createFileInfo ( ctx , dstPath , time . Now ( ) )
if err != nil {
return nil , errors . Wrap ( err , "shortcut destination failed" )
}
createInfo . MimeType = shortcutMimeType
createInfo . ShortcutDetails = & drive . FileShortcutDetails {
TargetId : srcID ,
}
var info * drive . File
err = dstFs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
info , err = dstFs . svc . Files . Create ( createInfo ) .
Fields ( partialFields ) .
SupportsAllDrives ( true ) .
KeepRevisionForever ( dstFs . opt . KeepRevisionForever ) .
Do ( )
return dstFs . shouldRetry ( err )
} )
if err != nil {
return nil , errors . Wrap ( err , "shortcut creation failed" )
}
if isDir {
return nil , nil
}
return dstFs . newObjectWithInfo ( dstPath , info )
}
var commandHelp = [ ] fs . CommandHelp { {
Name : "get" ,
Short : "Get command for fetching the drive config parameters" ,
Long : ` This is a get command which will be used to fetch the various drive config parameters
Usage Examples :
rclone backend get drive : [ - o service_account_file ] [ - o chunk_size ]
rclone rc backend / command command = get fs = drive : [ - o service_account_file ] [ - o chunk_size ]
` ,
Opts : map [ string ] string {
"chunk_size" : "show the current upload chunk size" ,
"service_account_file" : "show the current service account file" ,
2020-04-29 19:54:16 +02:00
} ,
2020-05-07 19:35:39 +02:00
} , {
Name : "set" ,
Short : "Set command for updating the drive config parameters" ,
Long : ` This is a set command which will be used to update the various drive config parameters
Usage Examples :
rclone backend set drive : [ - o service_account_file = sa . json ] [ - o chunk_size = 67108864 ]
rclone rc backend / command command = set fs = drive : [ - o service_account_file = sa . json ] [ - o chunk_size = 67108864 ]
` ,
Opts : map [ string ] string {
"chunk_size" : "update the current upload chunk size" ,
"service_account_file" : "update the current service account file" ,
2020-04-29 19:54:16 +02:00
} ,
2020-05-07 19:35:39 +02:00
} , {
Name : "shortcut" ,
Short : "Create shortcuts from files or directories" ,
Long : ` This command creates shortcuts from files or directories .
Usage :
rclone backend shortcut drive : source_item destination_shortcut
rclone backend shortcut drive : source_item - o target = drive2 : destination_shortcut
In the first example this creates a shortcut from the "source_item"
which can be a file or a directory to the "destination_shortcut" . The
"source_item" and the "destination_shortcut" should be relative paths
from "drive:"
In the second example this creates a shortcut from the "source_item"
relative to "drive:" to the "destination_shortcut" relative to
"drive2:" . This may fail with a permission error if the user
authenticated with "drive2:" can ' t read files from "drive:" .
` ,
Opts : map [ string ] string {
"target" : "optional target remote for the shortcut destination" ,
} ,
} }
2020-04-29 19:54:16 +02:00
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func ( f * Fs ) Command ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( out interface { } , err error ) {
switch name {
case "get" :
out := make ( map [ string ] string )
if _ , ok := opt [ "service_account_file" ] ; ok {
out [ "service_account_file" ] = f . opt . ServiceAccountFile
}
if _ , ok := opt [ "chunk_size" ] ; ok {
out [ "chunk_size" ] = fmt . Sprintf ( "%s" , f . opt . ChunkSize )
}
return out , nil
case "set" :
out := make ( map [ string ] map [ string ] string )
if serviceAccountFile , ok := opt [ "service_account_file" ] ; ok {
serviceAccountMap := make ( map [ string ] string )
serviceAccountMap [ "previous" ] = f . opt . ServiceAccountFile
if err = f . changeServiceAccountFile ( serviceAccountFile ) ; err != nil {
return out , err
}
f . m . Set ( "service_account_file" , serviceAccountFile )
serviceAccountMap [ "current" ] = f . opt . ServiceAccountFile
out [ "service_account_file" ] = serviceAccountMap
}
if chunkSize , ok := opt [ "chunk_size" ] ; ok {
chunkSizeMap := make ( map [ string ] string )
chunkSizeMap [ "previous" ] = fmt . Sprintf ( "%s" , f . opt . ChunkSize )
if err = f . changeChunkSize ( chunkSize ) ; err != nil {
return out , err
}
chunkSizeString := fmt . Sprintf ( "%s" , f . opt . ChunkSize )
f . m . Set ( "chunk_size" , chunkSizeString )
chunkSizeMap [ "current" ] = chunkSizeString
out [ "chunk_size" ] = chunkSizeMap
}
return out , nil
2020-05-07 19:35:39 +02:00
case "shortcut" :
if len ( arg ) != 2 {
return nil , errors . New ( "need exactly 2 arguments" )
}
dstFs := f
target , ok := opt [ "target" ]
if ok {
targetFs , err := cache . Get ( target )
if err != nil {
return nil , errors . Wrap ( err , "couldn't find target" )
}
dstFs , ok = targetFs . ( * Fs )
if ! ok {
return nil , errors . New ( "target is not a drive backend" )
}
}
2020-05-11 12:37:48 +02:00
return f . makeShortcut ( ctx , arg [ 0 ] , dstFs , arg [ 1 ] )
2020-04-29 19:54:16 +02:00
default :
return nil , fs . ErrorCommandNotFound
}
}
2013-01-15 00:38:18 +01:00
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Fs returns the parent Fs
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Fs ( ) fs . Info {
2015-11-07 12:14:46 +01:00
return o . fs
2014-03-28 18:56:04 +01:00
}
2018-08-21 12:49:33 +02:00
// Return a string version
func ( o * baseObject ) String ( ) string {
return o . remote
}
2014-03-28 18:56:04 +01:00
// Return a string version
2015-11-07 12:14:46 +01:00
func ( o * Object ) String ( ) string {
2014-03-28 18:56:04 +01:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-22 19:47:16 +02:00
// Remote returns the remote path
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Remote ( ) string {
2013-06-27 21:13:07 +02:00
return o . remote
2013-01-15 00:38:18 +01:00
}
2016-01-11 13:39:33 +01:00
// Hash returns the Md5sum of an object returning a lowercase hex string
2019-06-17 10:34:30 +02:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-01-18 21:27:52 +01:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
2016-01-11 13:39:33 +01:00
}
2013-06-27 21:13:07 +02:00
return o . md5sum , nil
2013-01-15 00:38:18 +01:00
}
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-08-21 12:49:33 +02:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
}
return "" , nil
}
2013-01-15 00:38:18 +01:00
// Size returns the size of an object in bytes
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Size ( ) int64 {
2013-06-27 21:13:07 +02:00
return o . bytes
2013-01-19 11:11:55 +01:00
}
2018-08-21 12:49:33 +02:00
// getRemoteInfo returns a drive.File for the remote
2019-06-17 10:34:30 +02:00
func ( f * Fs ) getRemoteInfo ( ctx context . Context , remote string ) ( info * drive . File , err error ) {
info , _ , _ , _ , _ , err = f . getRemoteInfoWithExport ( ctx , remote )
2018-08-21 12:49:33 +02:00
return
2018-07-24 17:14:23 +02:00
}
2018-08-21 12:49:33 +02:00
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
2019-06-17 10:34:30 +02:00
func ( f * Fs ) getRemoteInfoWithExport ( ctx context . Context , remote string ) (
2018-08-21 12:49:33 +02:00
info * drive . File , extension , exportName , exportMimeType string , isDocument bool , err error ) {
2020-05-11 18:24:37 +02:00
leaf , directoryID , err := f . dirCache . FindPath ( ctx , remote , false )
2013-01-15 00:38:18 +01:00
if err != nil {
2016-06-25 22:23:20 +02:00
if err == fs . ErrorDirNotFound {
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , fs . ErrorObjectNotFound
2016-06-25 22:23:20 +02:00
}
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , err
2013-01-15 00:38:18 +01:00
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2013-01-15 00:38:18 +01:00
2020-05-07 17:48:48 +02:00
found , err := f . list ( ctx , [ ] string { directoryID } , leaf , false , false , false , func ( item * drive . File ) bool {
2018-08-21 12:49:33 +02:00
if ! f . opt . SkipGdocs {
extension , exportName , exportMimeType , isDocument = f . findExportFormat ( item )
2018-07-24 17:14:23 +02:00
if exportName == leaf {
2018-08-21 12:49:33 +02:00
info = item
2018-07-24 17:14:23 +02:00
return true
}
2018-08-21 12:49:33 +02:00
if isDocument {
return false
}
}
if item . Name == leaf {
info = item
return true
2018-07-24 17:14:23 +02:00
}
2013-01-20 12:56:56 +01:00
return false
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , err
2013-01-15 00:38:18 +01:00
}
2013-01-20 12:56:56 +01:00
if ! found {
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , fs . ErrorObjectNotFound
2013-01-15 00:38:18 +01:00
}
2018-08-21 12:49:33 +02:00
return
2013-01-15 00:38:18 +01:00
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) ModTime ( ctx context . Context ) time . Time {
2014-07-29 18:50:07 +02:00
modTime , err := time . Parse ( timeFormatIn , o . modifiedDate )
2013-01-15 00:38:18 +01:00
if err != nil {
2017-02-09 18:08:51 +01:00
fs . Debugf ( o , "Failed to read mtime from object: %v" , err )
2013-01-15 00:38:18 +01:00
return time . Now ( )
}
return modTime
}
2015-09-22 19:47:16 +02:00
// SetModTime sets the modification time of the drive fs object
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) SetModTime ( ctx context . Context , modTime time . Time ) error {
2013-01-19 11:11:55 +01:00
// New metadata
2015-02-02 18:29:08 +01:00
updateInfo := & drive . File {
2018-01-24 00:46:41 +01:00
ModifiedTime : modTime . Format ( timeFormatOut ) ,
2013-01-19 11:11:55 +01:00
}
2013-01-15 00:38:18 +01:00
// Set modified date
2015-02-02 18:29:08 +01:00
var info * drive . File
2018-08-21 12:49:33 +02:00
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
var err error
2020-04-12 17:55:11 +02:00
info , err = o . fs . svc . Files . Update ( actualID ( o . id ) , updateInfo ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return o . fs . shouldRetry ( err )
2015-02-02 18:29:08 +01:00
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2013-01-15 00:38:18 +01:00
}
2015-01-05 00:19:59 +01:00
// Update info from read data
2018-08-21 12:49:33 +02:00
o . modifiedDate = info . ModifiedTime
2016-03-22 16:07:10 +01:00
return nil
2013-01-15 00:38:18 +01:00
}
2015-09-22 19:47:16 +02:00
// Storable returns a boolean as to whether this object is storable
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Storable ( ) bool {
2013-01-15 00:38:18 +01:00
return true
}
2018-08-21 12:49:33 +02:00
// httpResponse gets an http.Response object for the object
// using the url and method passed in
2019-09-04 21:21:10 +02:00
func ( o * baseObject ) httpResponse ( ctx context . Context , url , method string , options [ ] fs . OpenOption ) ( req * http . Request , res * http . Response , err error ) {
2018-08-21 12:49:33 +02:00
if url == "" {
2016-09-10 12:29:57 +02:00
return nil , nil , errors . New ( "forbidden to download - check sharing permission" )
2015-08-16 15:11:21 +02:00
}
2018-08-21 12:49:33 +02:00
req , err = http . NewRequest ( method , url , nil )
2014-07-15 12:15:48 +02:00
if err != nil {
2016-09-10 12:29:57 +02:00
return req , nil , err
2014-07-15 12:15:48 +02:00
}
2019-09-04 21:21:10 +02:00
req = req . WithContext ( ctx ) // go1.13 can use NewRequestWithContext
2016-09-10 12:29:57 +02:00
fs . OpenOptionAddHTTPHeaders ( req . Header , options )
2019-03-10 16:47:34 +01:00
if o . bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
delete ( req . Header , "Range" )
}
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
res , err = o . fs . client . Do ( req )
2018-04-11 21:46:50 +02:00
if err == nil {
err = googleapi . CheckResponse ( res )
if err != nil {
_ = res . Body . Close ( ) // ignore error
}
}
2020-01-12 16:47:31 +01:00
return o . fs . shouldRetry ( err )
2015-02-02 18:29:08 +01:00
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2016-09-10 12:29:57 +02:00
return req , nil , err
2013-01-15 00:38:18 +01:00
}
2016-09-10 12:29:57 +02:00
return req , res , nil
2016-01-26 17:52:53 +01:00
}
2020-05-20 12:39:20 +02:00
// openDocumentFile represents a documentObject open for reading.
2018-08-21 12:49:33 +02:00
// Updates the object size after read successfully.
type openDocumentFile struct {
o * documentObject // Object we are reading for
in io . ReadCloser // reading from here
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
errored bool // whether we have encountered an error during reading
2016-01-26 17:52:53 +01:00
}
// Read bytes from the object - see io.Reader
2018-08-21 12:49:33 +02:00
func ( file * openDocumentFile ) Read ( p [ ] byte ) ( n int , err error ) {
2016-01-26 17:52:53 +01:00
n , err = file . in . Read ( p )
file . bytes += int64 ( n )
2018-01-24 00:46:41 +01:00
if err != nil && err != io . EOF {
file . errored = true
}
2016-01-26 17:52:53 +01:00
if err == io . EOF {
file . eof = true
}
return
}
// Close the object and update bytes read
2018-08-21 12:49:33 +02:00
func ( file * openDocumentFile ) Close ( ) ( err error ) {
2016-01-26 17:52:53 +01:00
// If end of file, update bytes read
2018-01-24 00:46:41 +01:00
if file . eof && ! file . errored {
fs . Debugf ( file . o , "Updating size of doc after download to %v" , file . bytes )
2016-01-26 17:52:53 +01:00
file . o . bytes = file . bytes
}
return file . in . Close ( )
}
// Check it satisfies the interfaces
2018-08-21 12:49:33 +02:00
var _ io . ReadCloser = ( * openDocumentFile ) ( nil )
2016-01-26 17:52:53 +01:00
2018-06-08 17:05:25 +02:00
// Checks to see if err is a googleapi.Error with of type what
func isGoogleError ( err error , what string ) bool {
if gerr , ok := err . ( * googleapi . Error ) ; ok {
for _ , error := range gerr . Errors {
if error . Reason == what {
return true
}
}
}
return false
}
2018-08-21 12:49:33 +02:00
// open a url for reading
2019-09-04 21:21:10 +02:00
func ( o * baseObject ) open ( ctx context . Context , url string , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
_ , res , err := o . httpResponse ( ctx , url , "GET" , options )
2016-01-26 17:52:53 +01:00
if err != nil {
2018-06-08 17:05:25 +02:00
if isGoogleError ( err , "cannotDownloadAbusiveFile" ) {
2018-05-14 19:06:57 +02:00
if o . fs . opt . AcknowledgeAbuse {
2018-06-08 17:05:25 +02:00
// Retry acknowledging abuse
2018-08-21 12:49:33 +02:00
if strings . ContainsRune ( url , '?' ) {
url += "&"
2018-06-08 17:05:25 +02:00
} else {
2018-08-21 12:49:33 +02:00
url += "?"
2018-06-08 17:05:25 +02:00
}
2018-08-21 12:49:33 +02:00
url += "acknowledgeAbuse=true"
2019-09-04 21:21:10 +02:00
_ , res , err = o . httpResponse ( ctx , url , "GET" , options )
2018-06-08 17:05:25 +02:00
} else {
err = errors . Wrap ( err , "Use the --drive-acknowledge-abuse flag to download this file" )
}
}
if err != nil {
return nil , errors . Wrap ( err , "open file failed" )
}
2013-01-15 00:38:18 +01:00
}
return res . Body , nil
}
2018-08-21 12:49:33 +02:00
// Open an object for read
2019-06-17 10:34:30 +02:00
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2018-08-21 12:49:33 +02:00
if o . v2Download {
var v2File * drive_v2 . File
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
v2File , err = o . fs . v2Svc . Files . Get ( actualID ( o . id ) ) .
2018-08-21 12:49:33 +02:00
Fields ( "downloadUrl" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-21 12:49:33 +02:00
Do ( )
2020-01-12 16:47:31 +01:00
return o . fs . shouldRetry ( err )
2018-08-21 12:49:33 +02:00
} )
if err == nil {
fs . Debugf ( o , "Using v2 download: %v" , v2File . DownloadUrl )
o . url = v2File . DownloadUrl
o . v2Download = false
}
2014-04-18 18:04:21 +02:00
}
2019-09-04 21:21:10 +02:00
return o . baseObject . open ( ctx , o . url , options ... )
2018-08-21 12:49:33 +02:00
}
2019-06-17 10:34:30 +02:00
func ( o * documentObject ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2018-08-21 12:49:33 +02:00
// Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted.
2019-01-31 11:39:13 +01:00
var offset , end int64 = 0 , - 1
var newOptions = options [ : 0 ]
2018-08-21 12:49:33 +02:00
for _ , o := range options {
2019-01-31 11:39:13 +01:00
// Note that Range requests don't work on Google docs:
2018-08-21 12:49:33 +02:00
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
2019-01-31 11:39:13 +01:00
// So do a subset of them manually
switch x := o . ( type ) {
case * fs . RangeOption :
offset , end = x . Start , x . End
case * fs . SeekOption :
offset , end = x . Offset , - 1
default :
newOptions = append ( newOptions , o )
2018-08-19 16:16:11 +02:00
}
}
2019-01-31 11:39:13 +01:00
options = newOptions
if offset != 0 {
return nil , errors . New ( "partial downloads are not supported while exporting Google Documents" )
}
2019-09-04 21:21:10 +02:00
in , err = o . baseObject . open ( ctx , o . url , options ... )
2018-08-21 12:49:33 +02:00
if in != nil {
in = & openDocumentFile { o : o , in : in }
}
2019-01-31 11:39:13 +01:00
if end >= 0 {
in = readers . NewLimitedReadCloser ( in , end - offset + 1 )
}
2018-08-21 12:49:33 +02:00
return
}
2019-06-17 10:34:30 +02:00
func ( o * linkObject ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2018-08-21 12:51:36 +02:00
var offset , limit int64 = 0 , - 1
var data = o . content
for _ , option := range options {
switch x := option . ( type ) {
case * fs . SeekOption :
offset = x . Offset
case * fs . RangeOption :
offset , limit = x . Decode ( int64 ( len ( data ) ) )
default :
if option . Mandatory ( ) {
fs . Logf ( o , "Unsupported mandatory option: %v" , option )
}
}
}
if l := int64 ( len ( data ) ) ; offset > l {
offset = l
}
data = data [ offset : ]
if limit != - 1 && limit < int64 ( len ( data ) ) {
data = data [ : limit ]
}
return ioutil . NopCloser ( bytes . NewReader ( data ) ) , nil
}
2018-08-19 16:16:11 +02:00
2019-09-04 21:21:10 +02:00
func ( o * baseObject ) update ( ctx context . Context , updateInfo * drive . File , uploadMimeType string , in io . Reader ,
2018-08-21 12:49:33 +02:00
src fs . ObjectInfo ) ( info * drive . File , err error ) {
2014-04-18 18:46:57 +02:00
// Make the API request to upload metadata and file data.
2018-08-21 12:49:33 +02:00
size := src . Size ( )
2019-05-11 11:03:51 +02:00
if size >= 0 && size < int64 ( o . fs . opt . UploadCutoff ) {
2015-03-02 10:05:23 +01:00
// Don't retry, return a retry error instead
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
info , err = o . fs . svc . Files . Update ( actualID ( o . id ) , updateInfo ) .
2018-08-21 12:49:33 +02:00
Media ( in , googleapi . ContentType ( uploadMimeType ) ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
KeepRevisionForever ( o . fs . opt . KeepRevisionForever ) .
Do ( )
2020-01-12 16:47:31 +01:00
return o . fs . shouldRetry ( err )
2015-09-11 20:18:41 +02:00
} )
2018-08-21 12:49:33 +02:00
return
2014-04-18 18:04:21 +02:00
}
2018-08-21 12:49:33 +02:00
// Upload the file in chunks
2019-09-04 21:21:10 +02:00
return o . fs . Upload ( ctx , in , size , uploadMimeType , o . id , o . remote , updateInfo )
2018-08-21 12:49:33 +02:00
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2020-04-12 17:55:11 +02:00
// If o is a shortcut
if isShortcutID ( o . id ) {
// Delete it first
err := o . fs . delete ( ctx , shortcutID ( o . id ) , o . fs . opt . UseTrash )
if err != nil {
return err
}
// Then put the file as a new file
newObj , err := o . fs . PutUnchecked ( ctx , in , src , options ... )
if err != nil {
return err
}
// Update the object
if newO , ok := newObj . ( * Object ) ; ok {
* o = * newO
} else {
fs . Debugf ( newObj , "Failed to update object %T from new object %T" , o , newObj )
}
return nil
}
2019-06-17 10:34:30 +02:00
srcMimeType := fs . MimeType ( ctx , src )
2018-08-21 12:49:33 +02:00
updateInfo := & drive . File {
MimeType : srcMimeType ,
2019-06-17 10:34:30 +02:00
ModifiedTime : src . ModTime ( ctx ) . Format ( timeFormatOut ) ,
2018-08-21 12:49:33 +02:00
}
2019-09-04 21:21:10 +02:00
info , err := o . baseObject . update ( ctx , updateInfo , srcMimeType , in , src )
2018-08-21 12:49:33 +02:00
if err != nil {
return err
}
newO , err := o . fs . newObjectWithInfo ( src . Remote ( ) , info )
2019-01-11 18:00:59 +01:00
if err != nil {
return err
}
2018-08-21 12:49:33 +02:00
switch newO := newO . ( type ) {
case * Object :
* o = * newO
default :
return errors . New ( "object type changed by update" )
}
return nil
}
2019-06-17 10:34:30 +02:00
func ( o * documentObject ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
srcMimeType := fs . MimeType ( ctx , src )
2018-08-21 12:49:33 +02:00
importMimeType := ""
updateInfo := & drive . File {
MimeType : srcMimeType ,
2019-06-17 10:34:30 +02:00
ModifiedTime : src . ModTime ( ctx ) . Format ( timeFormatOut ) ,
2018-08-21 12:49:33 +02:00
}
if o . fs . importMimeTypes == nil || o . fs . opt . SkipGdocs {
return errors . Errorf ( "can't update google document type without --drive-import-formats" )
}
importMimeType = o . fs . findImportFormat ( updateInfo . MimeType )
if importMimeType == "" {
return errors . Errorf ( "no import format found for %q" , srcMimeType )
}
if importMimeType != o . documentMimeType {
return errors . Errorf ( "can't change google document type (o: %q, src: %q, import: %q)" , o . documentMimeType , srcMimeType , importMimeType )
}
updateInfo . MimeType = importMimeType
2019-09-04 21:21:10 +02:00
info , err := o . baseObject . update ( ctx , updateInfo , srcMimeType , in , src )
2018-08-21 12:49:33 +02:00
if err != nil {
return err
}
remote := src . Remote ( )
remote = remote [ : len ( remote ) - o . extLen ]
newO , err := o . fs . newObjectWithInfo ( remote , info )
2019-01-11 18:00:59 +01:00
if err != nil {
return err
}
2018-08-21 12:49:33 +02:00
switch newO := newO . ( type ) {
case * documentObject :
* o = * newO
default :
return errors . New ( "object type changed by update" )
2018-08-19 16:16:11 +02:00
}
2014-04-18 18:04:21 +02:00
return nil
}
2019-06-17 10:34:30 +02:00
func ( o * linkObject ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2018-08-21 12:51:36 +02:00
return errors . New ( "cannot update link files" )
}
2013-01-15 00:38:18 +01:00
// Remove an object
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) Remove ( ctx context . Context ) error {
2020-03-31 18:25:15 +02:00
if o . parents > 1 {
return errors . New ( "can't delete safely - has multiple parents" )
}
2020-04-12 17:55:11 +02:00
return o . fs . delete ( ctx , shortcutID ( o . id ) , o . fs . opt . UseTrash )
2013-01-15 00:38:18 +01:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) MimeType ( ctx context . Context ) string {
2016-09-21 23:13:24 +02:00
return o . mimeType
}
2018-05-13 10:16:56 +02:00
// ID returns the ID of the Object if known, or "" if not
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) ID ( ) string {
2018-05-13 10:16:56 +02:00
return o . id
}
2018-09-01 13:16:01 +02:00
func ( o * documentObject ) ext ( ) string {
return o . baseObject . remote [ len ( o . baseObject . remote ) - o . extLen : ]
}
func ( o * linkObject ) ext ( ) string {
return o . baseObject . remote [ len ( o . baseObject . remote ) - o . extLen : ]
}
2018-08-21 12:51:36 +02:00
// templates for document link files
const (
urlTemplate = ` [ InternetShortcut ] { { "\r" } }
URL = { { . URL } } { { "\r" } }
`
weblocTemplate = ` < ? xml version = "1.0" encoding = "UTF-8" ? >
< ! DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd" >
< plist version = "1.0" >
< dict >
< key > URL < / key >
< string > { { . URL } } < / string >
< / dict >
< / plist >
`
desktopTemplate = ` [ Desktop Entry ]
Encoding = UTF - 8
Name = { { . Title } }
URL = { { . URL } }
Icon = text - html
Type = Link
`
htmlTemplate = ` < html >
< head >
< meta http - equiv = "refresh" content = "0; url={{ .URL }}" / >
< title > { { . Title } } < / title >
< / head >
< body >
Loading < a href = "{{ .URL }}" > { { . Title } } < / a >
< / body >
< / html >
`
)
2013-01-15 00:38:18 +01:00
// Check the interfaces are satisfied
2015-08-31 22:05:51 +02:00
var (
2018-03-08 21:03:34 +01:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
_ fs . CleanUpper = ( * Fs ) ( nil )
_ fs . PutStreamer = ( * Fs ) ( nil )
_ fs . Copier = ( * Fs ) ( nil )
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
2020-04-29 19:54:16 +02:00
_ fs . Commander = ( * Fs ) ( nil )
2018-03-08 21:03:34 +01:00
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
_ fs . ChangeNotifier = ( * Fs ) ( nil )
_ fs . PutUncheckeder = ( * Fs ) ( nil )
2018-03-29 09:10:19 +02:00
_ fs . PublicLinker = ( * Fs ) ( nil )
2018-07-26 15:45:41 +02:00
_ fs . ListRer = ( * Fs ) ( nil )
2018-03-08 21:03:34 +01:00
_ fs . MergeDirser = ( * Fs ) ( nil )
2018-04-16 23:19:25 +02:00
_ fs . Abouter = ( * Fs ) ( nil )
2018-03-08 21:03:34 +01:00
_ fs . Object = ( * Object ) ( nil )
2018-04-16 23:19:25 +02:00
_ fs . MimeTyper = ( * Object ) ( nil )
2018-05-13 10:16:56 +02:00
_ fs . IDer = ( * Object ) ( nil )
2018-08-21 12:49:33 +02:00
_ fs . Object = ( * documentObject ) ( nil )
_ fs . MimeTyper = ( * documentObject ) ( nil )
_ fs . IDer = ( * documentObject ) ( nil )
2018-08-21 12:51:36 +02:00
_ fs . Object = ( * linkObject ) ( nil )
_ fs . MimeTyper = ( * linkObject ) ( nil )
_ fs . IDer = ( * linkObject ) ( nil )
2015-08-31 22:05:51 +02:00
)