2017-03-08 12:21:57 +01:00
// Package onedrive provides an interface to the Microsoft OneDrive
2015-10-04 23:08:31 +02:00
// object storage system.
package onedrive
import (
2019-06-17 10:34:30 +02:00
"context"
2018-04-20 13:55:49 +02:00
"encoding/base64"
"encoding/hex"
2017-03-12 13:00:10 +01:00
"encoding/json"
2015-10-04 23:08:31 +02:00
"fmt"
"io"
"log"
"net/http"
2016-11-25 22:52:43 +01:00
"path"
2020-01-29 13:16:18 +01:00
"strconv"
2015-10-04 23:08:31 +02:00
"strings"
"time"
2016-05-30 20:49:21 +02:00
"github.com/pkg/errors"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/backend/onedrive/api"
2019-09-23 15:32:36 +02:00
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
2018-11-02 13:14:19 +01:00
"github.com/rclone/rclone/lib/atexit"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/dircache"
2020-01-14 18:33:35 +01:00
"github.com/rclone/rclone/lib/encoder"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
2015-10-04 23:08:31 +02:00
"golang.org/x/oauth2"
)
const (
2018-08-18 12:06:22 +02:00
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
minSleep = 10 * time . Millisecond
maxSleep = 2 * time . Second
decayConstant = 2 // bigger for slower decay, exponential
graphURL = "https://graph.microsoft.com/v1.0"
2018-08-21 03:52:24 +02:00
configDriveID = "drive_id"
configDriveType = "drive_type"
2018-08-18 12:06:22 +02:00
driveTypePersonal = "personal"
driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary"
2018-09-07 13:02:27 +02:00
defaultChunkSize = 10 * fs . MebiByte
chunkSizeMultiple = 320 * fs . KibiByte
2015-10-04 23:08:31 +02:00
)
// Globals
var (
2017-08-03 21:57:42 +02:00
// Description of how to auth for this app for a business account
2018-08-18 12:06:22 +02:00
oauthConfig = & oauth2 . Config {
2017-08-03 21:57:42 +02:00
Endpoint : oauth2 . Endpoint {
2018-08-18 12:06:22 +02:00
AuthURL : "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" ,
TokenURL : "https://login.microsoftonline.com/common/oauth2/v2.0/token" ,
2017-08-03 21:57:42 +02:00
} ,
2020-01-20 13:30:19 +01:00
Scopes : [ ] string { "Files.Read" , "Files.ReadWrite" , "Files.Read.All" , "Files.ReadWrite.All" , "offline_access" , "Sites.Read.All" } ,
2018-08-18 12:06:22 +02:00
ClientID : rcloneClientID ,
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2017-08-03 21:57:42 +02:00
RedirectURL : oauthutil . RedirectLocalhostURL ,
}
2019-09-23 15:32:36 +02:00
// QuickXorHashType is the hash.Type for OneDrive
QuickXorHashType hash . Type
2015-10-04 23:08:31 +02:00
)
// Register with Fs
func init ( ) {
2019-09-23 15:32:36 +02:00
QuickXorHashType = hash . RegisterHash ( "QuickXorHash" , 40 , quickxorhash . New )
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "onedrive" ,
Description : "Microsoft OneDrive" ,
NewFs : NewFs ,
2018-05-14 19:06:57 +02:00
Config : func ( name string , m configmap . Mapper ) {
2019-09-04 21:00:37 +02:00
ctx := context . TODO ( )
2018-08-18 12:06:22 +02:00
err := oauthutil . Config ( "onedrive" , name , m , oauthConfig )
if err != nil {
log . Fatalf ( "Failed to configure token: %v" , err )
return
}
2017-09-06 17:19:52 +02:00
2019-01-17 16:01:13 +01:00
// Stop if we are running non-interactive config
if fs . Config . AutoConfirm {
2018-08-18 12:06:22 +02:00
return
}
2017-08-03 21:57:42 +02:00
2018-08-18 12:06:22 +02:00
type driveResource struct {
DriveID string ` json:"id" `
DriveName string ` json:"name" `
DriveType string ` json:"driveType" `
}
type drivesResponse struct {
Drives [ ] driveResource ` json:"value" `
}
2017-08-03 21:57:42 +02:00
2018-08-18 12:06:22 +02:00
type siteResource struct {
SiteID string ` json:"id" `
SiteName string ` json:"displayName" `
SiteURL string ` json:"webUrl" `
}
type siteResponse struct {
Sites [ ] siteResource ` json:"value" `
}
oAuthClient , _ , err := oauthutil . NewClient ( name , m , oauthConfig )
if err != nil {
log . Fatalf ( "Failed to configure OneDrive: %v" , err )
}
srv := rest . NewClient ( oAuthClient )
var opts rest . Opts
var finalDriveID string
var siteID string
switch config . Choose ( "Your choice" ,
[ ] string { "onedrive" , "sharepoint" , "driveid" , "siteid" , "search" } ,
2018-08-30 05:36:50 +02:00
[ ] string { "OneDrive Personal or Business" , "Root Sharepoint site" , "Type in driveID" , "Type in SiteID" , "Search a Sharepoint site" } ,
2018-08-18 12:06:22 +02:00
false ) {
2017-08-03 21:57:42 +02:00
2018-08-18 12:06:22 +02:00
case "onedrive" :
opts = rest . Opts {
2017-09-06 17:19:52 +02:00
Method : "GET" ,
2018-08-18 12:06:22 +02:00
RootURL : graphURL ,
Path : "/me/drives" ,
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
case "sharepoint" :
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/root/drives" ,
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
case "driveid" :
fmt . Printf ( "Paste your Drive ID here> " )
finalDriveID = config . ReadLine ( )
case "siteid" :
fmt . Printf ( "Paste your Site ID here> " )
siteID = config . ReadLine ( )
case "search" :
fmt . Printf ( "What to search for> " )
searchTerm := config . ReadLine ( )
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites?search=" + searchTerm ,
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
sites := siteResponse { }
2019-09-04 21:00:37 +02:00
_ , err := srv . CallJSON ( ctx , & opts , nil , & sites )
2018-08-18 12:06:22 +02:00
if err != nil {
log . Fatalf ( "Failed to query available sites: %v" , err )
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
if len ( sites . Sites ) == 0 {
log . Fatalf ( "Search for '%s' returned no results" , searchTerm )
} else {
fmt . Printf ( "Found %d sites, please select the one you want to use:\n" , len ( sites . Sites ) )
for index , site := range sites . Sites {
fmt . Printf ( "%d: %s (%s) id=%s\n" , index , site . SiteName , site . SiteURL , site . SiteID )
}
siteID = sites . Sites [ config . ChooseNumber ( "Chose drive to use:" , 0 , len ( sites . Sites ) - 1 ) ] . SiteID
2017-09-06 17:19:52 +02:00
}
2018-08-18 12:06:22 +02:00
}
2017-09-06 17:19:52 +02:00
2018-08-18 12:06:22 +02:00
// if we have a siteID we need to ask for the drives
if siteID != "" {
2017-09-06 17:19:52 +02:00
opts = rest . Opts {
2018-08-18 12:06:22 +02:00
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/" + siteID + "/drives" ,
2017-09-06 17:19:52 +02:00
}
2018-08-18 12:06:22 +02:00
}
2017-09-06 17:19:52 +02:00
2018-08-18 12:06:22 +02:00
// We don't have the final ID yet?
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse { }
2019-09-04 21:00:37 +02:00
_ , err := srv . CallJSON ( ctx , & opts , nil , & drives )
2017-09-06 17:19:52 +02:00
if err != nil {
2018-08-18 12:06:22 +02:00
log . Fatalf ( "Failed to query available drives: %v" , err )
2017-09-06 17:19:52 +02:00
}
2020-03-22 13:21:37 +01:00
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts . Path == "/me/drives" {
opts . Path = "/me/drive"
meDrive := driveResource { }
_ , err := srv . CallJSON ( ctx , & opts , nil , & meDrive )
if err != nil {
log . Fatalf ( "Failed to query available drives: %v" , err )
}
found := false
for _ , drive := range drives . Drives {
if drive . DriveID == meDrive . DriveID {
found = true
break
}
}
// add the me drive if not found already
if ! found {
fs . Debugf ( nil , "Adding %v to drives list from /me/drive" , meDrive )
drives . Drives = append ( drives . Drives , meDrive )
}
}
2018-08-18 12:06:22 +02:00
if len ( drives . Drives ) == 0 {
log . Fatalf ( "No drives found" )
} else {
fmt . Printf ( "Found %d drives, please select the one you want to use:\n" , len ( drives . Drives ) )
for index , drive := range drives . Drives {
fmt . Printf ( "%d: %s (%s) id=%s\n" , index , drive . DriveName , drive . DriveType , drive . DriveID )
}
finalDriveID = drives . Drives [ config . ChooseNumber ( "Chose drive to use:" , 0 , len ( drives . Drives ) - 1 ) ] . DriveID
2017-08-03 21:57:42 +02:00
}
2015-10-04 23:08:31 +02:00
}
2018-08-18 12:06:22 +02:00
// Test the driveID and get drive type
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/drives/" + finalDriveID + "/root" }
var rootItem api . Item
2019-09-04 21:00:37 +02:00
_ , err = srv . CallJSON ( ctx , & opts , nil , & rootItem )
2018-08-18 12:06:22 +02:00
if err != nil {
log . Fatalf ( "Failed to query root for drive %s: %v" , finalDriveID , err )
}
fmt . Printf ( "Found drive '%s' of type '%s', URL: %s\nIs that okay?\n" , rootItem . Name , rootItem . ParentReference . DriveType , rootItem . WebURL )
// This does not work, YET :)
2019-01-17 16:01:13 +01:00
if ! config . ConfirmWithConfig ( m , "config_drive_ok" , true ) {
2018-08-18 12:06:22 +02:00
log . Fatalf ( "Cancelled by user" )
}
2018-09-06 18:07:16 +02:00
m . Set ( configDriveID , finalDriveID )
m . Set ( configDriveType , rootItem . ParentReference . DriveType )
config . SaveConfig ( )
2015-10-04 23:08:31 +02:00
} ,
Options : [ ] fs . Option { {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientID ,
2018-05-14 19:06:57 +02:00
Help : "Microsoft App Client Id\nLeave blank normally." ,
2015-10-04 23:08:31 +02:00
} , {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientSecret ,
2018-05-14 19:06:57 +02:00
Help : "Microsoft App Client Secret\nLeave blank normally." ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
2019-10-12 13:08:22 +02:00
Help : ` Chunk size to upload files with - must be multiple of 320 k ( 327 , 680 bytes ) .
2018-10-01 19:36:15 +02:00
2020-03-10 16:14:08 +01:00
Above this size files will be chunked - must be multiple of 320 k ( 327 , 680 bytes ) and
should not exceed 250 M ( 262 , 144 , 000 bytes ) else you may encounter \ "Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
Note that the chunks will be buffered into memory . ` ,
2018-09-07 13:02:27 +02:00
Default : defaultChunkSize ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2018-08-21 04:50:17 +02:00
} , {
Name : "drive_id" ,
Help : "The ID of the drive to use" ,
Default : "" ,
Advanced : true ,
} , {
Name : "drive_type" ,
2019-01-11 17:55:25 +01:00
Help : "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )" ,
2018-08-21 04:50:17 +02:00
Default : "" ,
Advanced : true ,
2018-10-03 06:46:25 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "expose_onenote_files" ,
Help : ` Set to make OneNote files show up in directory listings .
By default rclone will hide OneNote files in directory listings because
operations like "Open" and "Update" won ' t work on them . But this
behaviour may also prevent you from deleting them . If you want to
delete OneNote files or otherwise want them to show up in directory
listing , set this option . ` ,
2018-10-03 06:46:25 +02:00
Default : false ,
Advanced : true ,
2020-03-15 13:07:46 +01:00
} , {
Name : "server_side_across_configs" ,
Default : false ,
Help : ` Allow server side operations ( eg copy ) to work across different onedrive configs .
This can be useful if you wish to do a server side copy between two
different Onedrives . Note that this isn ' t enabled by default
because it isn ' t easy to tell if it will work between any two
configurations . ` ,
Advanced : true ,
2020-01-14 18:33:35 +01:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-14 22:51:49 +01:00
// List of replaced characters:
// < (less than) -> '< ' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '> ' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> ': ' // FULLWIDTH COLON
// " (double quote) -> '" ' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '\ ' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '| ' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '? ' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '* ' // FULLWIDTH ASTERISK
// # (number sign) -> '# ' // FULLWIDTH NUMBER SIGN
// % (percent sign) -> '% ' // FULLWIDTH PERCENT SIGN
//
// Folder names cannot begin with a tilde ('~')
// List of replaced characters:
// ~ (tilde) -> '~ ' // FULLWIDTH TILDE
//
// Additionally names can't begin with a space ( ) or end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '. ' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as json doesn't handle them.
//
// The OneDrive API documentation lists the set of reserved characters, but
// testing showed this list is incomplete. This are the differences:
// - " (double quote) is rejected, but missing in the documentation
// - space at the end of file and folder names is rejected, but missing in the documentation
// - period at the end of file names is rejected, but missing in the documentation
//
// Adding these restrictions to the OneDrive API documentation yields exactly
// the same rules as the Windows naming conventions.
//
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
Default : ( encoder . Display |
encoder . EncodeBackSlash |
encoder . EncodeHashPercent |
encoder . EncodeLeftSpace |
encoder . EncodeLeftTilde |
encoder . EncodeRightPeriod |
encoder . EncodeRightSpace |
encoder . EncodeWin |
encoder . EncodeInvalidUtf8 ) ,
2015-10-04 23:08:31 +02:00
} } ,
} )
2018-05-14 19:06:57 +02:00
}
2017-08-03 21:57:42 +02:00
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
2020-03-15 13:07:46 +01:00
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
DriveID string ` config:"drive_id" `
DriveType string ` config:"drive_type" `
ExposeOneNoteFiles bool ` config:"expose_onenote_files" `
ServerSideAcrossConfigs bool ` config:"server_side_across_configs" `
Enc encoder . MultiEncoder ` config:"encoding" `
2015-10-04 23:08:31 +02:00
}
// Fs represents a remote one drive
type Fs struct {
2017-01-29 21:42:43 +01:00
name string // name of this remote
root string // the path we are working on
2018-05-14 19:06:57 +02:00
opt Options // parsed options
2017-01-29 21:42:43 +01:00
features * fs . Features // optional features
srv * rest . Client // the connection to the one drive server
dirCache * dircache . DirCache // Map of directory path to directory id
2019-02-09 21:52:15 +01:00
pacer * fs . Pacer // pacer for API calls
2017-01-29 21:42:43 +01:00
tokenRenewer * oauthutil . Renew // renew the token on expiry
2018-08-18 12:06:22 +02:00
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
2015-10-04 23:08:31 +02:00
}
// Object describes a one drive object
//
// Will definitely have info but maybe not meta
type Object struct {
2018-10-03 06:46:25 +02:00
fs * Fs // what this object is part of
remote string // The remote path
hasMetaData bool // whether info below has been set
isOneNoteFile bool // Whether the object is a OneNote file
size int64 // size of the object
modTime time . Time // modification time of the object
id string // ID of the object
sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
mimeType string // Content-Type of object from server (may not be as uploaded)
2015-10-04 23:08:31 +02:00
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
return f . root
}
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
return fmt . Sprintf ( "One drive root '%s'" , f . root )
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2015-10-04 23:08:31 +02:00
// parsePath parses an one drive 'url'
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
429 , // Too Many Requests.
500 , // Internal Server Error
502 , // Bad Gateway
503 , // Service Unavailable
504 , // Gateway Timeout
509 , // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry ( resp * http . Response , err error ) ( bool , error ) {
2020-01-29 13:16:18 +01:00
retry := false
if resp != nil {
switch resp . StatusCode {
case 401 :
if len ( resp . Header [ "Www-Authenticate" ] ) == 1 && strings . Index ( resp . Header [ "Www-Authenticate" ] [ 0 ] , "expired_token" ) >= 0 {
retry = true
fs . Debugf ( nil , "Should retry: %v" , err )
}
case 429 : // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
if values := resp . Header [ "Retry-After" ] ; len ( values ) == 1 && values [ 0 ] != "" {
retryAfter , parseErr := strconv . Atoi ( values [ 0 ] )
if parseErr != nil {
fs . Debugf ( nil , "Failed to parse Retry-After: %q: %v" , values [ 0 ] , parseErr )
} else {
duration := time . Second * time . Duration ( retryAfter )
retry = true
err = pacer . RetryAfterError ( err , duration )
fs . Debugf ( nil , "Too many requests. Trying again in %d seconds." , retryAfter )
}
}
2020-03-25 13:56:38 +01:00
case 507 : // Insufficient Storage
return false , fserrors . FatalError ( err )
2020-01-29 13:16:18 +01:00
}
2017-03-23 14:10:43 +01:00
}
2020-01-29 13:16:18 +01:00
return retry || fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2015-10-04 23:08:31 +02:00
}
2019-01-09 06:11:00 +01:00
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
// if `relPath` == "", it reads the metadata for the item with that ID.
2019-03-11 09:30:38 +01:00
//
// We address items using the pattern `drives/driveID/items/itemID:/relativePath`
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
2019-10-26 19:02:22 +02:00
//
// If `relPath` == '', do not append the slash (See #3664)
2019-09-04 21:00:37 +02:00
func ( f * Fs ) readMetaDataForPathRelativeToID ( ctx context . Context , normalizedID string , relPath string ) ( info * api . Item , resp * http . Response , err error ) {
2019-10-26 19:02:22 +02:00
if relPath != "" {
2020-01-14 18:33:35 +01:00
relPath = "/" + withTrailingColon ( rest . URLPathEscape ( f . opt . Enc . FromStandardPath ( relPath ) ) )
2019-10-26 19:02:22 +02:00
}
opts := newOptsCall ( normalizedID , "GET" , ":" + relPath )
2015-10-04 23:08:31 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & info )
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
2018-07-11 19:48:59 +02:00
2015-10-04 23:08:31 +02:00
return info , resp , err
}
2019-01-09 06:11:00 +01:00
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
2019-06-17 10:34:30 +02:00
func ( f * Fs ) readMetaDataForPath ( ctx context . Context , path string ) ( info * api . Item , resp * http . Response , err error ) {
2019-01-09 06:11:00 +01:00
firstSlashIndex := strings . IndexRune ( path , '/' )
if f . driveType != driveTypePersonal || firstSlashIndex == - 1 {
var opts rest . Opts
if len ( path ) == 0 {
opts = rest . Opts {
Method : "GET" ,
Path : "/root" ,
}
} else {
opts = rest . Opts {
Method : "GET" ,
2020-01-14 18:33:35 +01:00
Path : "/root:/" + rest . URLPathEscape ( f . opt . Enc . FromStandardPath ( path ) ) ,
2019-01-09 06:11:00 +01:00
}
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & info )
2019-01-09 06:11:00 +01:00
return shouldRetry ( resp , err )
} )
return info , resp , err
}
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
// For OneDrive Personal, we need to consider the "shared with me" folders.
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
// by its path relative to the folder's ID relative to the sharer's driveID.
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
// So we read metadata relative to a suitable folder's normalized ID.
var dirCacheFoundRoot bool
var rootNormalizedID string
if f . dirCache != nil {
2019-05-21 09:45:03 +02:00
var dirCacheRootIDExists bool
rootNormalizedID , dirCacheRootIDExists = f . dirCache . Get ( "" )
if f . root == "" {
// if f.root == "", it means f.root is the absolute root of the drive
// and its ID should have been found in NewFs
dirCacheFoundRoot = dirCacheRootIDExists
} else if _ , err := f . dirCache . RootParentID ( ) ; err == nil {
// if root is in a folder, it must have a parent folder, and
// if dirCache has found root in NewFs, the parent folder's ID
// should be present.
// This RootParentID() check is a fix for #3164 which describes
// a possible case where the root is not found.
dirCacheFoundRoot = dirCacheRootIDExists
2019-01-09 06:11:00 +01:00
}
}
relPath , insideRoot := getRelativePathInsideBase ( f . root , path )
var firstDir , baseNormalizedID string
if ! insideRoot || ! dirCacheFoundRoot {
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
firstDir , relPath = path [ : firstSlashIndex ] , path [ firstSlashIndex + 1 : ]
2019-06-17 10:34:30 +02:00
info , resp , err := f . readMetaDataForPath ( ctx , firstDir )
2019-01-09 06:11:00 +01:00
if err != nil {
return info , resp , err
}
baseNormalizedID = info . GetID ( )
} else {
if f . root != "" {
// Read metadata based on root
baseNormalizedID = rootNormalizedID
} else {
// Read metadata based on firstDir
firstDir , relPath = path [ : firstSlashIndex ] , path [ firstSlashIndex + 1 : ]
2019-06-17 10:34:30 +02:00
baseNormalizedID , err = f . dirCache . FindDir ( ctx , firstDir , false )
2019-01-09 06:11:00 +01:00
if err != nil {
return nil , nil , err
}
}
}
2019-09-04 21:00:37 +02:00
return f . readMetaDataForPathRelativeToID ( ctx , baseNormalizedID , relPath )
2019-01-09 06:11:00 +01:00
}
2015-11-27 13:46:13 +01:00
// errorHandler parses a non 2xx error response into an error
func errorHandler ( resp * http . Response ) error {
// Decode error response
errResponse := new ( api . Error )
err := rest . DecodeJSON ( resp , & errResponse )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Debugf ( nil , "Couldn't decode error response: %v" , err )
2015-11-27 13:46:13 +01:00
}
if errResponse . ErrorInfo . Code == "" {
errResponse . ErrorInfo . Code = resp . Status
}
return errResponse
}
2018-09-07 13:02:27 +02:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
const minChunkSize = fs . Byte
if cs % chunkSizeMultiple != 0 {
return errors . Errorf ( "%s is not a multiple of %s" , cs , chunkSizeMultiple )
}
if cs < minChunkSize {
return errors . Errorf ( "%s is less than %s" , cs , minChunkSize )
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
}
return
}
2015-10-04 23:08:31 +02:00
// NewFs constructs an Fs from the path, container:path
2018-05-14 19:06:57 +02:00
func NewFs ( name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
2019-06-17 10:34:30 +02:00
ctx := context . Background ( )
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-09-07 13:02:27 +02:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
return nil , errors . Wrap ( err , "onedrive: chunk size" )
2018-05-14 19:06:57 +02:00
}
2017-08-03 21:57:42 +02:00
2018-08-21 04:50:17 +02:00
if opt . DriveID == "" || opt . DriveType == "" {
2018-11-11 11:40:13 +01:00
return nil , errors . New ( "unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend" )
2018-08-21 04:50:17 +02:00
}
2018-08-18 12:06:22 +02:00
2015-10-04 23:08:31 +02:00
root = parsePath ( root )
2018-05-14 19:06:57 +02:00
oAuthClient , ts , err := oauthutil . NewClient ( name , m , oauthConfig )
2015-10-04 23:08:31 +02:00
if err != nil {
2018-11-11 11:40:13 +01:00
return nil , errors . Wrap ( err , "failed to configure OneDrive" )
2015-10-04 23:08:31 +02:00
}
f := & Fs {
2018-08-18 12:06:22 +02:00
name : name ,
root : root ,
opt : * opt ,
2018-08-21 04:50:17 +02:00
driveID : opt . DriveID ,
driveType : opt . DriveType ,
srv : rest . NewClient ( oAuthClient ) . SetRoot ( graphURL + "/drives/" + opt . DriveID ) ,
2019-02-09 21:52:15 +01:00
pacer : fs . NewPacer ( pacer . NewDefault ( pacer . MinSleep ( minSleep ) , pacer . MaxSleep ( maxSleep ) , pacer . DecayConstant ( decayConstant ) ) ) ,
2015-10-04 23:08:31 +02:00
}
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
2018-08-18 12:06:22 +02:00
CaseInsensitive : true ,
ReadMimeType : true ,
2017-08-09 16:27:43 +02:00
CanHaveEmptyDirectories : true ,
2020-03-15 13:07:46 +01:00
ServerSideAcrossConfigs : opt . ServerSideAcrossConfigs ,
2017-08-09 16:27:43 +02:00
} ) . Fill ( f )
2015-11-27 13:46:13 +01:00
f . srv . SetErrorHandler ( errorHandler )
2015-10-04 23:08:31 +02:00
2017-01-29 21:42:43 +01:00
// Renew the token in the background
f . tokenRenewer = oauthutil . NewRenew ( f . String ( ) , ts , func ( ) error {
2019-06-17 10:34:30 +02:00
_ , _ , err := f . readMetaDataForPath ( ctx , "" )
2017-01-29 21:42:43 +01:00
return err
} )
2017-06-27 12:34:32 +02:00
// Get rootID
2019-06-17 10:34:30 +02:00
rootInfo , _ , err := f . readMetaDataForPath ( ctx , "" )
2019-01-24 04:52:01 +01:00
if err != nil || rootInfo . GetID ( ) == "" {
2017-06-27 12:34:32 +02:00
return nil , errors . Wrap ( err , "failed to get root" )
}
2019-01-24 04:52:01 +01:00
f . dirCache = dircache . New ( root , rootInfo . GetID ( ) , f )
2015-10-04 23:08:31 +02:00
// Find the current root
2019-06-17 10:34:30 +02:00
err = f . dirCache . FindRoot ( ctx , false )
2015-10-04 23:08:31 +02:00
if err != nil {
// Assume it is a file
newRoot , remote := dircache . SplitPath ( root )
2018-10-14 15:41:26 +02:00
tempF := * f
tempF . dirCache = dircache . New ( newRoot , rootInfo . ID , & tempF )
tempF . root = newRoot
2015-10-04 23:08:31 +02:00
// Make new Fs which is the parent
2019-06-17 10:34:30 +02:00
err = tempF . dirCache . FindRoot ( ctx , false )
2015-10-04 23:08:31 +02:00
if err != nil {
// No root so return old f
return f , nil
}
2019-06-17 10:34:30 +02:00
_ , err := tempF . newObjectWithInfo ( ctx , remote , nil )
2016-06-25 22:23:20 +02:00
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
return f , nil
}
return nil , err
2015-10-04 23:08:31 +02:00
}
2018-10-14 15:41:26 +02:00
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
2019-07-28 19:47:38 +02:00
// See https://github.com/rclone/rclone/issues/2182
2018-10-14 15:41:26 +02:00
f . dirCache = tempF . dirCache
f . root = tempF . root
2016-06-21 19:01:53 +02:00
// return an error with an fs which points to the parent
2018-10-14 15:41:26 +02:00
return f , fs . ErrorIsFile
2015-10-04 23:08:31 +02:00
}
return f , nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func ( f * Fs ) rootSlash ( ) string {
if f . root == "" {
return f . root
}
return f . root + "/"
}
// Return an Object from a path
//
2016-06-25 22:23:20 +02:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) newObjectWithInfo ( ctx context . Context , remote string , info * api . Item ) ( fs . Object , error ) {
2015-10-04 23:08:31 +02:00
o := & Object {
fs : f ,
remote : remote ,
}
2017-03-06 21:11:54 +01:00
var err error
2015-10-04 23:08:31 +02:00
if info != nil {
// Set info
2017-03-06 21:11:54 +01:00
err = o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
} else {
2019-06-17 10:34:30 +02:00
err = o . readMetaData ( ctx ) // reads info and meta, returning an error
2017-03-06 21:11:54 +01:00
}
if err != nil {
return nil , err
2015-10-04 23:08:31 +02:00
}
2016-06-25 22:23:20 +02:00
return o , nil
2015-10-04 23:08:31 +02:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
return f . newObjectWithInfo ( ctx , remote , nil )
2015-10-04 23:08:31 +02:00
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2019-06-17 10:34:30 +02:00
func ( f * Fs ) FindLeaf ( ctx context . Context , pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2017-02-09 12:01:20 +01:00
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
2019-01-09 06:11:00 +01:00
_ , ok := f . dirCache . GetInv ( pathID )
2015-10-04 23:08:31 +02:00
if ! ok {
2016-06-12 16:06:02 +02:00
return "" , false , errors . New ( "couldn't find parent ID" )
2015-10-04 23:08:31 +02:00
}
2019-09-04 21:00:37 +02:00
info , resp , err := f . readMetaDataForPathRelativeToID ( ctx , pathID , leaf )
2015-10-04 23:08:31 +02:00
if err != nil {
if resp != nil && resp . StatusCode == http . StatusNotFound {
return "" , false , nil
}
return "" , false , err
}
2018-10-03 06:46:25 +02:00
if info . GetPackageType ( ) == api . PackageTypeOneNote {
return "" , false , errors . New ( "found OneNote file when looking for folder" )
}
2018-07-11 19:48:59 +02:00
if info . GetFolder ( ) == nil {
2016-06-12 16:06:02 +02:00
return "" , false , errors . New ( "found file when looking for folder" )
2015-10-04 23:08:31 +02:00
}
2018-07-11 19:48:59 +02:00
return info . GetID ( ) , true , nil
2015-10-04 23:08:31 +02:00
}
// CreateDir makes a directory with pathID as parent and name leaf
2019-06-17 10:34:30 +02:00
func ( f * Fs ) CreateDir ( ctx context . Context , dirID , leaf string ) ( newID string , err error ) {
2018-07-11 19:48:59 +02:00
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
2015-10-04 23:08:31 +02:00
var resp * http . Response
var info * api . Item
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( dirID , "POST" , "/children" )
2015-10-04 23:08:31 +02:00
mkdir := api . CreateItemRequest {
2020-01-14 18:33:35 +01:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2015-10-04 23:08:31 +02:00
ConflictBehavior : "fail" ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & mkdir , & info )
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "" , err
}
2018-07-11 19:48:59 +02:00
2015-10-04 23:08:31 +02:00
//fmt.Printf("...Id %q\n", *info.Id)
2018-07-11 19:48:59 +02:00
return info . GetID ( ) , nil
2015-10-04 23:08:31 +02:00
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func ( * api . Item ) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2019-09-04 21:00:37 +02:00
func ( f * Fs ) listAll ( ctx context . Context , dirID string , directoriesOnly bool , filesOnly bool , fn listAllFn ) ( found bool , err error ) {
2015-10-04 23:08:31 +02:00
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
2018-08-18 12:06:22 +02:00
opts := newOptsCall ( dirID , "GET" , "/children?$top=1000" )
2015-10-04 23:08:31 +02:00
OUTER :
for {
var result api . ListChildrenResponse
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & result )
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
2016-05-30 20:49:21 +02:00
return found , errors . Wrap ( err , "couldn't list files" )
2015-10-04 23:08:31 +02:00
}
if len ( result . Value ) == 0 {
break
}
for i := range result . Value {
item := & result . Value [ i ]
2018-07-11 19:48:59 +02:00
isFolder := item . GetFolder ( ) != nil
2015-10-04 23:08:31 +02:00
if isFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
}
if item . Deleted != nil {
continue
}
2020-01-14 18:33:35 +01:00
item . Name = f . opt . Enc . ToStandardName ( item . GetName ( ) )
2015-10-04 23:08:31 +02:00
if fn ( item ) {
found = true
break OUTER
}
}
if result . NextLink == "" {
break
}
2017-07-07 09:18:13 +02:00
opts . Path = ""
opts . RootURL = result . NextLink
2015-10-04 23:08:31 +02:00
}
return
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
err = f . dirCache . FindRoot ( ctx , false )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
2019-06-17 10:34:30 +02:00
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
var iErr error
2019-09-04 21:00:37 +02:00
_ , err = f . listAll ( ctx , directoryID , false , false , func ( info * api . Item ) bool {
2018-10-03 06:46:25 +02:00
if ! f . opt . ExposeOneNoteFiles && info . GetPackageType ( ) == api . PackageTypeOneNote {
fs . Debugf ( info . Name , "OneNote file not shown in directory listing" )
return false
}
2018-07-11 19:48:59 +02:00
remote := path . Join ( dir , info . GetName ( ) )
folder := info . GetFolder ( )
if folder != nil {
2017-06-11 23:43:31 +02:00
// cache the directory ID for later lookups
2018-07-11 19:48:59 +02:00
id := info . GetID ( )
f . dirCache . Put ( remote , id )
d := fs . NewDir ( remote , time . Time ( info . GetLastModifiedDateTime ( ) ) ) . SetID ( id )
2019-03-18 12:23:00 +01:00
d . SetItems ( folder . ChildCount )
2017-06-11 23:43:31 +02:00
entries = append ( entries , d )
2015-10-04 23:08:31 +02:00
} else {
2019-06-17 10:34:30 +02:00
o , err := f . newObjectWithInfo ( ctx , remote , info )
2016-06-25 22:23:20 +02:00
if err != nil {
2017-06-11 23:43:31 +02:00
iErr = err
2016-06-25 22:23:20 +02:00
return true
2015-10-04 23:08:31 +02:00
}
2017-06-11 23:43:31 +02:00
entries = append ( entries , o )
2015-10-04 23:08:31 +02:00
}
return false
} )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
if iErr != nil {
return nil , iErr
}
return entries , nil
2015-10-04 23:08:31 +02:00
}
2015-10-30 09:40:14 +01:00
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
2015-10-04 23:08:31 +02:00
//
2015-10-30 09:40:14 +01:00
// Returns the object, leaf, directoryID and error
2015-10-04 23:08:31 +02:00
//
2015-10-30 09:40:14 +01:00
// Used to create new objects
2019-06-17 10:34:30 +02:00
func ( f * Fs ) createObject ( ctx context . Context , remote string , modTime time . Time , size int64 ) ( o * Object , leaf string , directoryID string , err error ) {
2015-10-04 23:08:31 +02:00
// Create the directory for the object if it doesn't exist
2019-06-17 10:34:30 +02:00
leaf , directoryID , err = f . dirCache . FindRootAndPath ( ctx , remote , true )
2015-10-04 23:08:31 +02:00
if err != nil {
2015-10-30 09:40:14 +01:00
return nil , leaf , directoryID , err
2015-10-04 23:08:31 +02:00
}
// Temporary Object under construction
2015-10-30 09:40:14 +01:00
o = & Object {
2015-10-04 23:08:31 +02:00
fs : f ,
remote : remote ,
}
2015-10-30 09:40:14 +01:00
return o , leaf , directoryID , nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2016-02-18 12:35:25 +01:00
remote := src . Remote ( )
size := src . Size ( )
2019-06-17 10:34:30 +02:00
modTime := src . ModTime ( ctx )
2016-02-18 12:35:25 +01:00
2019-06-17 10:34:30 +02:00
o , _ , _ , err := f . createObject ( ctx , remote , modTime , size )
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
2019-06-17 10:34:30 +02:00
return o , o . Update ( ctx , in , src , options ... )
2015-10-04 23:08:31 +02:00
}
// Mkdir creates the container if it doesn't exist
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
err := f . dirCache . FindRoot ( ctx , true )
2016-11-25 22:52:43 +01:00
if err != nil {
return err
}
if dir != "" {
2019-06-17 10:34:30 +02:00
_ , err = f . dirCache . FindDir ( ctx , dir , true )
2016-11-25 22:52:43 +01:00
}
return err
2015-10-04 23:08:31 +02:00
}
// deleteObject removes an object by ID
2019-09-04 21:00:37 +02:00
func ( f * Fs ) deleteObject ( ctx context . Context , id string ) error {
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( id , "DELETE" , "" )
opts . NoResponse = true
2015-10-04 23:08:31 +02:00
return f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err := f . srv . Call ( ctx , & opts )
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
2019-06-17 10:34:30 +02:00
func ( f * Fs ) purgeCheck ( ctx context . Context , dir string , check bool ) error {
2016-11-25 22:52:43 +01:00
root := path . Join ( f . root , dir )
if root == "" {
2016-06-12 16:06:02 +02:00
return errors . New ( "can't purge root directory" )
2015-10-04 23:08:31 +02:00
}
dc := f . dirCache
2019-06-17 10:34:30 +02:00
err := dc . FindRoot ( ctx , false )
2017-01-15 13:18:07 +01:00
if err != nil {
return err
}
2019-06-17 10:34:30 +02:00
rootID , err := dc . FindDir ( ctx , dir , false )
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2018-09-01 00:07:12 +02:00
if check {
// check to see if there are any items
2019-09-04 21:00:37 +02:00
found , err := f . listAll ( ctx , rootID , false , false , func ( item * api . Item ) bool {
2018-09-01 00:07:12 +02:00
return true
} )
if err != nil {
return err
}
if found {
return fs . ErrorDirectoryNotEmpty
}
2015-10-04 23:08:31 +02:00
}
2019-09-04 21:00:37 +02:00
err = f . deleteObject ( ctx , rootID )
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2016-11-25 22:52:43 +01:00
f . dirCache . FlushDir ( dir )
2015-10-04 23:08:31 +02:00
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , true )
2015-10-04 23:08:31 +02:00
}
// Precision return the precision of this Fs
func ( f * Fs ) Precision ( ) time . Duration {
return time . Second
}
2015-10-30 09:40:14 +01:00
// waitForJob waits for the job with status in url to complete
2019-06-17 10:34:30 +02:00
func ( f * Fs ) waitForJob ( ctx context . Context , location string , o * Object ) error {
2015-10-30 09:40:14 +01:00
deadline := time . Now ( ) . Add ( fs . Config . Timeout )
for time . Now ( ) . Before ( deadline ) {
var resp * http . Response
var err error
2017-03-12 13:00:10 +01:00
var body [ ] byte
2015-10-30 09:40:14 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-18 12:06:22 +02:00
resp , err = http . Get ( location )
2017-03-12 13:00:10 +01:00
if err != nil {
2018-01-12 17:30:54 +01:00
return fserrors . ShouldRetry ( err ) , err
2017-03-12 13:00:10 +01:00
}
body , err = rest . ReadBody ( resp )
2018-01-12 17:30:54 +01:00
return fserrors . ShouldRetry ( err ) , err
2015-10-30 09:40:14 +01:00
} )
if err != nil {
return err
}
2017-03-12 13:00:10 +01:00
// Try to decode the body first as an api.AsyncOperationStatus
var status api . AsyncOperationStatus
err = json . Unmarshal ( body , & status )
if err != nil {
return errors . Wrapf ( err , "async status result not JSON: %q" , body )
}
2018-08-18 12:06:22 +02:00
switch status . Status {
case "failed" :
case "deleteFailed" :
{
return errors . Errorf ( "%s: async operation returned %q" , o . remote , status . Status )
2015-10-30 09:40:14 +01:00
}
2018-08-18 12:06:22 +02:00
case "completed" :
2019-06-17 10:34:30 +02:00
err = o . readMetaData ( ctx )
2018-08-18 12:06:22 +02:00
return errors . Wrapf ( err , "async operation completed but readMetaData failed" )
2015-10-30 09:40:14 +01:00
}
2018-08-18 12:06:22 +02:00
2015-10-30 09:40:14 +01:00
time . Sleep ( 1 * time . Second )
}
2016-06-12 16:06:02 +02:00
return errors . Errorf ( "async operation didn't complete after %v" , fs . Config . Timeout )
2015-10-30 09:40:14 +01:00
}
2015-10-04 23:08:31 +02:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2015-10-30 09:40:14 +01:00
srcObj , ok := src . ( * Object )
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-10-30 09:40:14 +01:00
return nil , fs . ErrorCantCopy
}
2019-06-17 10:34:30 +02:00
err := srcObj . readMetaData ( ctx )
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
2020-03-15 13:07:46 +01:00
// Check we aren't overwriting a file on the same remote
if srcObj . fs == f {
srcPath := srcObj . rootPath ( )
dstPath := f . rootPath ( remote )
if strings . ToLower ( srcPath ) == strings . ToLower ( dstPath ) {
return nil , errors . Errorf ( "can't copy %q -> %q as are same name when lowercase" , srcPath , dstPath )
}
2017-02-22 20:28:22 +01:00
}
2015-10-30 09:40:14 +01:00
// Create temporary object
2019-06-17 10:34:30 +02:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
// Copy the object
2018-08-18 12:06:22 +02:00
opts := newOptsCall ( srcObj . id , "POST" , "/copy" )
2018-07-11 19:48:59 +02:00
opts . ExtraHeaders = map [ string ] string { "Prefer" : "respond-async" }
opts . NoResponse = true
2019-01-09 06:11:00 +01:00
id , dstDriveID , _ := parseNormalizedID ( directoryID )
2018-07-11 19:48:59 +02:00
2020-01-14 18:33:35 +01:00
replacedLeaf := f . opt . Enc . FromStandardName ( leaf )
2018-08-04 12:16:43 +02:00
copyReq := api . CopyItemRequest {
2015-10-30 09:40:14 +01:00
Name : & replacedLeaf ,
ParentReference : api . ItemReference {
2019-01-09 06:11:00 +01:00
DriveID : dstDriveID ,
2018-08-18 12:06:22 +02:00
ID : id ,
2015-10-30 09:40:14 +01:00
} ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & copyReq , nil )
2015-10-30 09:40:14 +01:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
// read location header
location := resp . Header . Get ( "Location" )
if location == "" {
2016-06-12 16:06:02 +02:00
return nil , errors . New ( "didn't receive location header in copy response" )
2015-10-30 09:40:14 +01:00
}
// Wait for job to finish
2019-06-17 10:34:30 +02:00
err = f . waitForJob ( ctx , location , dstObj )
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
2018-03-15 08:06:17 +01:00
2018-03-15 08:06:17 +01:00
// Copy does NOT copy the modTime from the source and there seems to
// be no way to set date before
// This will create TWO versions on OneDrive
2019-06-17 10:34:30 +02:00
err = dstObj . SetModTime ( ctx , srcObj . ModTime ( ctx ) )
2018-03-15 08:06:17 +01:00
if err != nil {
return nil , err
}
2015-10-30 09:40:14 +01:00
return dstObj , nil
}
2015-10-04 23:08:31 +02:00
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Purge ( ctx context . Context ) error {
return f . purgeCheck ( ctx , "" , false )
2015-10-04 23:08:31 +02:00
}
2017-03-14 16:35:10 +01:00
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Move ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2017-03-14 16:35:10 +01:00
srcObj , ok := src . ( * Object )
if ! ok {
fs . Debugf ( src , "Can't move - not same remote type" )
return nil , fs . ErrorCantMove
}
// Create temporary object
2019-06-17 10:34:30 +02:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2017-03-14 16:35:10 +01:00
if err != nil {
return nil , err
}
2019-01-09 06:11:00 +01:00
id , dstDriveID , _ := parseNormalizedID ( directoryID )
_ , srcObjDriveID , _ := parseNormalizedID ( srcObj . id )
if dstDriveID != srcObjDriveID {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
return nil , fs . ErrorCantMove
}
2017-03-14 16:35:10 +01:00
// Move the object
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( srcObj . id , "PATCH" , "" )
2017-03-14 16:35:10 +01:00
move := api . MoveItemRequest {
2020-01-14 18:33:35 +01:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2017-03-14 16:35:10 +01:00
ParentReference : & api . ItemReference {
2019-01-09 06:11:00 +01:00
DriveID : dstDriveID ,
ID : id ,
2017-03-14 16:35:10 +01:00
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( srcObj . modTime ) ,
LastModifiedDateTime : api . Timestamp ( srcObj . modTime ) ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & move , & info )
2017-03-14 16:35:10 +01:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
err = dstObj . setMetaData ( & info )
if err != nil {
return nil , err
}
return dstObj , nil
}
2018-08-02 18:13:37 +02:00
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
2019-06-17 10:34:30 +02:00
func ( f * Fs ) DirMove ( ctx context . Context , src fs . Fs , srcRemote , dstRemote string ) error {
2018-08-02 18:13:37 +02:00
srcFs , ok := src . ( * Fs )
if ! ok {
fs . Debugf ( srcFs , "Can't move directory - not same remote type" )
return fs . ErrorCantDirMove
}
srcPath := path . Join ( srcFs . root , srcRemote )
dstPath := path . Join ( f . root , dstRemote )
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs . Debugf ( src , "DirMove error: Can't move root" )
return errors . New ( "can't move root directory" )
}
// find the root src directory
2019-06-17 10:34:30 +02:00
err := srcFs . dirCache . FindRoot ( ctx , false )
2018-08-02 18:13:37 +02:00
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
2019-06-17 10:34:30 +02:00
err = f . dirCache . FindRoot ( ctx , true )
2018-08-02 18:13:37 +02:00
if err != nil {
return err
}
} else {
if f . dirCache . FoundRoot ( ) {
return fs . ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf , dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f . root
}
2019-06-17 10:34:30 +02:00
leaf , dstDirectoryID , err = f . dirCache . FindPath ( ctx , findPath , true )
2018-08-02 18:13:37 +02:00
if err != nil {
return err
}
2019-01-09 06:11:00 +01:00
parsedDstDirID , dstDriveID , _ := parseNormalizedID ( dstDirectoryID )
// Find ID of src
2019-06-17 10:34:30 +02:00
srcID , err := srcFs . dirCache . FindDir ( ctx , srcRemote , false )
2019-01-09 06:11:00 +01:00
if err != nil {
return err
}
_ , srcDriveID , _ := parseNormalizedID ( srcID )
if dstDriveID != srcDriveID {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
return fs . ErrorCantDirMove
}
2018-08-02 18:13:37 +02:00
// Check destination does not exist
if dstRemote != "" {
2019-06-17 10:34:30 +02:00
_ , err = f . dirCache . FindDir ( ctx , dstRemote , false )
2018-08-02 18:13:37 +02:00
if err == fs . ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs . ErrorDirExists
}
}
// Get timestamps of src so they can be preserved
2019-09-04 21:00:37 +02:00
srcInfo , _ , err := srcFs . readMetaDataForPathRelativeToID ( ctx , srcID , "" )
2018-08-02 18:13:37 +02:00
if err != nil {
return err
}
// Do the move
opts := newOptsCall ( srcID , "PATCH" , "" )
move := api . MoveItemRequest {
2020-01-14 18:33:35 +01:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2018-08-02 18:13:37 +02:00
ParentReference : & api . ItemReference {
2019-01-09 06:11:00 +01:00
DriveID : dstDriveID ,
ID : parsedDstDirID ,
2018-08-02 18:13:37 +02:00
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : srcInfo . CreatedDateTime ,
LastModifiedDateTime : srcInfo . LastModifiedDateTime ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & move , & info )
2018-08-02 18:13:37 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
return err
}
srcFs . dirCache . FlushDir ( srcRemote )
return nil
}
2016-12-09 16:39:29 +01:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
}
2018-04-16 23:19:25 +02:00
// About gets quota information
2019-06-17 10:34:30 +02:00
func ( f * Fs ) About ( ctx context . Context ) ( usage * fs . Usage , err error ) {
2018-04-16 23:19:25 +02:00
var drive api . Drive
opts := rest . Opts {
Method : "GET" ,
Path : "" ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & drive )
2018-04-16 23:19:25 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , errors . Wrap ( err , "about failed" )
}
q := drive . Quota
usage = & fs . Usage {
Total : fs . NewUsageValue ( q . Total ) , // quota of bytes that can be used
Used : fs . NewUsageValue ( q . Used ) , // bytes in use
Trashed : fs . NewUsageValue ( q . Deleted ) , // bytes in trash
Free : fs . NewUsageValue ( q . Remaining ) , // bytes which can be uploaded before reaching the quota
}
return usage , nil
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-08-18 12:06:22 +02:00
if f . driveType == driveTypePersonal {
return hash . Set ( hash . SHA1 )
2018-04-20 13:55:49 +02:00
}
2019-09-23 15:32:36 +02:00
return hash . Set ( QuickXorHashType )
2016-01-11 13:39:33 +01:00
}
2018-10-09 14:11:48 +02:00
// PublicLink returns a link for downloading without accout.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PublicLink ( ctx context . Context , remote string ) ( link string , err error ) {
2018-11-02 13:14:19 +01:00
info , _ , err := f . readMetaDataForPath ( ctx , f . rootPath ( remote ) )
2018-10-09 14:11:48 +02:00
if err != nil {
return "" , err
}
2019-01-09 06:11:00 +01:00
opts := newOptsCall ( info . GetID ( ) , "POST" , "/createLink" )
2018-10-09 14:11:48 +02:00
share := api . CreateShareLinkRequest {
Type : "view" ,
Scope : "anonymous" ,
}
var resp * http . Response
var result api . CreateShareLinkResponse
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & share , & result )
2018-10-09 14:11:48 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
fmt . Println ( err )
return "" , err
}
return result . Link . WebURL , nil
}
2015-10-04 23:08:31 +02:00
// ------------------------------------------------------------
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-10-04 23:08:31 +02:00
return o . fs
}
// Return a string version
func ( o * Object ) String ( ) string {
if o == nil {
return "<nil>"
}
return o . remote
}
// Remote returns the remote path
func ( o * Object ) Remote ( ) string {
return o . remote
}
2018-11-02 13:14:19 +01:00
// rootPath returns a path for use in server given a remote
func ( f * Fs ) rootPath ( remote string ) string {
return f . rootSlash ( ) + remote
}
// rootPath returns a path for use in local functions
func ( o * Object ) rootPath ( ) string {
return o . fs . rootPath ( o . remote )
}
2018-10-14 15:17:53 +02:00
// srvPath returns a path for use in server given a remote
func ( f * Fs ) srvPath ( remote string ) string {
2020-01-14 18:33:35 +01:00
return f . opt . Enc . FromStandardPath ( f . rootSlash ( ) + remote )
2018-10-14 15:17:53 +02:00
}
2015-10-04 23:08:31 +02:00
// srvPath returns a path for use in server
func ( o * Object ) srvPath ( ) string {
2018-10-14 15:17:53 +02:00
return o . fs . srvPath ( o . remote )
2015-10-04 23:08:31 +02:00
}
2016-01-11 13:39:33 +01:00
// Hash returns the SHA-1 of an object returning a lowercase hex string
2019-06-17 10:34:30 +02:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-08-18 12:06:22 +02:00
if o . fs . driveType == driveTypePersonal {
if t == hash . SHA1 {
return o . sha1 , nil
}
} else {
2019-09-23 15:32:36 +02:00
if t == QuickXorHashType {
2018-08-18 12:06:22 +02:00
return o . quickxorhash , nil
2018-04-20 13:55:49 +02:00
}
2016-01-11 13:39:33 +01:00
}
2018-08-18 12:06:22 +02:00
return "" , hash . ErrUnsupported
2015-10-04 23:08:31 +02:00
}
// Size returns the size of an object in bytes
func ( o * Object ) Size ( ) int64 {
2019-06-17 10:34:30 +02:00
err := o . readMetaData ( context . TODO ( ) )
2015-10-04 23:08:31 +02:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-04 23:08:31 +02:00
return 0
}
return o . size
}
// setMetaData sets the metadata from info
2017-03-06 21:11:54 +01:00
func ( o * Object ) setMetaData ( info * api . Item ) ( err error ) {
2018-07-11 19:48:59 +02:00
if info . GetFolder ( ) != nil {
2017-03-06 21:11:54 +01:00
return errors . Wrapf ( fs . ErrorNotAFile , "%q" , o . remote )
}
2015-10-04 23:08:31 +02:00
o . hasMetaData = true
2018-07-11 19:48:59 +02:00
o . size = info . GetSize ( )
2016-01-11 13:39:33 +01:00
2018-10-03 06:46:25 +02:00
o . isOneNoteFile = info . GetPackageType ( ) == api . PackageTypeOneNote
2018-04-20 13:55:49 +02:00
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
2016-01-17 11:45:17 +01:00
//
2018-04-20 13:55:49 +02:00
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
2018-07-11 19:48:59 +02:00
file := info . GetFile ( )
if file != nil {
o . mimeType = file . MimeType
if file . Hashes . Sha1Hash != "" {
o . sha1 = strings . ToLower ( file . Hashes . Sha1Hash )
2016-09-21 23:13:24 +02:00
}
2018-07-11 19:48:59 +02:00
if file . Hashes . QuickXorHash != "" {
h , err := base64 . StdEncoding . DecodeString ( file . Hashes . QuickXorHash )
2018-04-20 13:55:49 +02:00
if err != nil {
2018-07-11 19:48:59 +02:00
fs . Errorf ( o , "Failed to decode QuickXorHash %q: %v" , file . Hashes . QuickXorHash , err )
2018-04-20 13:55:49 +02:00
} else {
o . quickxorhash = hex . EncodeToString ( h )
}
}
2016-01-11 13:39:33 +01:00
}
2018-07-11 19:48:59 +02:00
fileSystemInfo := info . GetFileSystemInfo ( )
if fileSystemInfo != nil {
o . modTime = time . Time ( fileSystemInfo . LastModifiedDateTime )
2015-10-04 23:08:31 +02:00
} else {
2018-07-11 19:48:59 +02:00
o . modTime = time . Time ( info . GetLastModifiedDateTime ( ) )
2015-10-04 23:08:31 +02:00
}
2018-07-11 19:48:59 +02:00
o . id = info . GetID ( )
2017-03-06 21:11:54 +01:00
return nil
2015-10-04 23:08:31 +02:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2019-06-17 10:34:30 +02:00
func ( o * Object ) readMetaData ( ctx context . Context ) ( err error ) {
2015-10-04 23:08:31 +02:00
if o . hasMetaData {
return nil
}
2018-11-02 13:14:19 +01:00
info , _ , err := o . fs . readMetaDataForPath ( ctx , o . rootPath ( ) )
2015-10-04 23:08:31 +02:00
if err != nil {
2016-06-25 22:23:20 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "itemNotFound" {
return fs . ErrorObjectNotFound
}
}
2015-10-04 23:08:31 +02:00
return err
}
2017-03-06 21:11:54 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2019-06-17 10:34:30 +02:00
func ( o * Object ) ModTime ( ctx context . Context ) time . Time {
err := o . readMetaData ( ctx )
2015-10-04 23:08:31 +02:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-04 23:08:31 +02:00
return time . Now ( )
}
return o . modTime
}
// setModTime sets the modification time of the local fs object
2019-06-17 10:34:30 +02:00
func ( o * Object ) setModTime ( ctx context . Context , modTime time . Time ) ( * api . Item , error ) {
2018-07-11 19:48:59 +02:00
var opts rest . Opts
2019-06-17 10:34:30 +02:00
leaf , directoryID , _ := o . fs . dirCache . FindPath ( ctx , o . remote , false )
2019-01-09 06:11:00 +01:00
trueDirID , drive , rootURL := parseNormalizedID ( directoryID )
2018-07-11 19:48:59 +02:00
if drive != "" {
opts = rest . Opts {
Method : "PATCH" ,
RootURL : rootURL ,
2020-01-14 18:33:35 +01:00
Path : "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon ( rest . URLPathEscape ( o . fs . opt . Enc . FromStandardName ( leaf ) ) ) ,
2018-07-11 19:48:59 +02:00
}
} else {
opts = rest . Opts {
Method : "PATCH" ,
2019-03-11 09:30:38 +01:00
Path : "/root:/" + withTrailingColon ( rest . URLPathEscape ( o . srvPath ( ) ) ) ,
2018-07-11 19:48:59 +02:00
}
2015-10-04 23:08:31 +02:00
}
update := api . SetFileSystemInfo {
FileSystemInfo : api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( modTime ) ,
LastModifiedDateTime : api . Timestamp ( modTime ) ,
} ,
}
var info * api . Item
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err := o . fs . srv . CallJSON ( ctx , & opts , & update , & info )
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
return info , err
}
// SetModTime sets the modification time of the local fs object
2019-06-17 10:34:30 +02:00
func ( o * Object ) SetModTime ( ctx context . Context , modTime time . Time ) error {
info , err := o . setModTime ( ctx , modTime )
2015-10-04 23:08:31 +02:00
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2015-10-04 23:08:31 +02:00
}
2017-03-06 21:11:54 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// Storable returns a boolean showing whether this object storable
func ( o * Object ) Storable ( ) bool {
return true
}
// Open an object for read
2019-06-17 10:34:30 +02:00
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2015-10-04 23:08:31 +02:00
if o . id == "" {
2016-06-12 16:06:02 +02:00
return nil , errors . New ( "can't download - no id" )
2015-10-04 23:08:31 +02:00
}
2018-10-03 06:46:25 +02:00
if o . isOneNoteFile {
return nil , errors . New ( "can't open a OneNote file" )
}
2018-01-22 18:05:00 +01:00
fs . FixRangeOption ( options , o . size )
2015-10-04 23:08:31 +02:00
var resp * http . Response
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( o . id , "GET" , "/content" )
opts . Options = options
2015-10-04 23:08:31 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
2018-02-16 14:21:26 +01:00
if resp . StatusCode == http . StatusOK && resp . ContentLength > 0 && resp . Header . Get ( "Content-Range" ) == "" {
//Overwrite size with actual size since size readings from Onedrive is unreliable.
o . size = resp . ContentLength
}
2015-10-04 23:08:31 +02:00
return resp . Body , err
}
// createUploadSession creates an upload session for the object
2019-06-17 10:34:30 +02:00
func ( o * Object ) createUploadSession ( ctx context . Context , modTime time . Time ) ( response * api . CreateUploadResponse , err error ) {
leaf , directoryID , _ := o . fs . dirCache . FindPath ( ctx , o . remote , false )
2019-01-09 06:11:00 +01:00
id , drive , rootURL := parseNormalizedID ( directoryID )
2018-07-11 19:48:59 +02:00
var opts rest . Opts
if drive != "" {
opts = rest . Opts {
Method : "POST" ,
RootURL : rootURL ,
2018-11-02 13:14:19 +01:00
Path : fmt . Sprintf ( "/%s/items/%s:/%s:/createUploadSession" ,
2020-01-14 18:33:35 +01:00
drive , id , rest . URLPathEscape ( o . fs . opt . Enc . FromStandardName ( leaf ) ) ) ,
2018-07-11 19:48:59 +02:00
}
} else {
opts = rest . Opts {
Method : "POST" ,
2018-08-18 12:06:22 +02:00
Path : "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) + ":/createUploadSession" ,
2018-07-11 19:48:59 +02:00
}
2015-10-04 23:08:31 +02:00
}
2018-03-15 08:06:17 +01:00
createRequest := api . CreateUploadRequest { }
2018-03-16 20:18:51 +01:00
createRequest . Item . FileSystemInfo . CreatedDateTime = api . Timestamp ( modTime )
2018-03-15 08:06:17 +01:00
createRequest . Item . FileSystemInfo . LastModifiedDateTime = api . Timestamp ( modTime )
2015-10-04 23:08:31 +02:00
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . CallJSON ( ctx , & opts , & createRequest , & response )
2018-10-03 06:46:25 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
2018-03-15 08:06:17 +01:00
return response , err
2015-10-04 23:08:31 +02:00
}
2020-01-25 11:41:20 +01:00
// getPosition gets the current position in a multipart upload
func ( o * Object ) getPosition ( ctx context . Context , url string ) ( pos int64 , err error ) {
2015-11-27 13:46:13 +01:00
opts := rest . Opts {
2020-01-25 11:41:20 +01:00
Method : "GET" ,
RootURL : url ,
}
var info api . UploadFragmentResponse
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err = o . fs . srv . CallJSON ( ctx , & opts , nil , & info )
return shouldRetry ( resp , err )
} )
if err != nil {
return 0 , err
}
if len ( info . NextExpectedRanges ) != 1 {
return 0 , errors . Errorf ( "bad number of ranges in upload position: %v" , info . NextExpectedRanges )
}
position := info . NextExpectedRanges [ 0 ]
i := strings . IndexByte ( position , '-' )
if i < 0 {
return 0 , errors . Errorf ( "no '-' in next expected range: %q" , position )
2015-10-04 23:08:31 +02:00
}
2020-01-25 11:41:20 +01:00
position = position [ : i ]
pos , err = strconv . ParseInt ( position , 10 , 64 )
if err != nil {
return 0 , errors . Wrapf ( err , "bad expected range: %q" , position )
}
return pos , nil
}
// uploadFragment uploads a part
2020-03-21 23:31:51 +01:00
func ( o * Object ) uploadFragment ( ctx context . Context , url string , start int64 , totalSize int64 , chunk io . ReadSeeker , chunkSize int64 , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-03-15 08:06:17 +01:00
// var response api.UploadFragmentResponse
2015-10-04 23:08:31 +02:00
var resp * http . Response
2019-08-28 12:21:38 +02:00
var body [ ] byte
2020-01-25 11:41:20 +01:00
var skip = int64 ( 0 )
2015-10-04 23:08:31 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2020-01-25 11:41:20 +01:00
toSend := chunkSize - skip
opts := rest . Opts {
Method : "PUT" ,
RootURL : url ,
ContentLength : & toSend ,
ContentRange : fmt . Sprintf ( "bytes %d-%d/%d" , start + skip , start + chunkSize - 1 , totalSize ) ,
Body : chunk ,
2020-03-21 23:31:51 +01:00
Options : options ,
2020-01-25 11:41:20 +01:00
}
_ , _ = chunk . Seek ( skip , io . SeekStart )
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2020-01-25 11:41:20 +01:00
if err != nil && resp != nil && resp . StatusCode == http . StatusRequestedRangeNotSatisfiable {
fs . Debugf ( o , "Received 416 error - reading current position from server: %v" , err )
pos , posErr := o . getPosition ( ctx , url )
if posErr != nil {
fs . Debugf ( o , "Failed to read position: %v" , posErr )
return false , posErr
}
skip = pos - start
fs . Debugf ( o , "Read position %d, chunk is %d..%d, bytes to skip = %d" , pos , start , start + chunkSize , skip )
switch {
case skip < 0 :
return false , errors . Wrapf ( err , "sent block already (skip %d < 0), can't rewind" , skip )
case skip > chunkSize :
return false , errors . Wrapf ( err , "position is in the future (skip %d > chunkSize %d), can't skip forward" , skip , chunkSize )
case skip == chunkSize :
fs . Debugf ( o , "Skipping chunk as already sent (skip %d == chunkSize %d)" , skip , chunkSize )
return false , nil
}
return true , errors . Wrapf ( err , "retry this chunk skipping %d bytes" , skip )
}
2019-08-28 12:21:38 +02:00
if err != nil {
return shouldRetry ( resp , err )
2018-03-26 18:17:56 +02:00
}
2019-08-28 12:21:38 +02:00
body , err = rest . ReadBody ( resp )
if err != nil {
return shouldRetry ( resp , err )
}
if resp . StatusCode == 200 || resp . StatusCode == 201 {
// we are done :)
// read the item
info = & api . Item { }
return false , json . Unmarshal ( body , info )
2018-03-15 08:06:17 +01:00
}
2019-08-28 12:21:38 +02:00
return false , nil
2015-10-04 23:08:31 +02:00
} )
2018-03-17 10:46:06 +01:00
return info , err
2015-10-04 23:08:31 +02:00
}
// cancelUploadSession cancels an upload session
2019-09-04 21:00:37 +02:00
func ( o * Object ) cancelUploadSession ( ctx context . Context , url string ) ( err error ) {
2015-11-27 13:46:13 +01:00
opts := rest . Opts {
2015-10-04 23:08:31 +02:00
Method : "DELETE" ,
2017-07-07 09:18:13 +02:00
RootURL : url ,
2015-10-04 23:08:31 +02:00
NoResponse : true ,
}
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
return
}
// uploadMultipart uploads a file using multipart upload
2020-03-21 23:31:51 +01:00
func ( o * Object ) uploadMultipart ( ctx context . Context , in io . Reader , size int64 , modTime time . Time , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-09-04 17:57:47 +02:00
if size <= 0 {
2018-11-02 13:12:22 +01:00
return nil , errors . New ( "unknown-sized upload not supported" )
2018-09-04 17:57:47 +02:00
}
2019-03-11 13:54:14 +01:00
uploadURLChan := make ( chan string , 1 )
gracefulCancel := func ( ) {
uploadURL , ok := <- uploadURLChan
// Reading from uploadURLChan blocks the atexit process until
// we are able to use uploadURL to cancel the upload
if ! ok { // createUploadSession failed - no need to cancel upload
return
}
fs . Debugf ( o , "Cancelling multipart upload" )
2019-09-04 21:00:37 +02:00
cancelErr := o . cancelUploadSession ( ctx , uploadURL )
2019-03-11 13:54:14 +01:00
if cancelErr != nil {
fs . Logf ( o , "Failed to cancel multipart upload: %v" , cancelErr )
}
}
cancelFuncHandle := atexit . Register ( gracefulCancel )
2015-10-04 23:08:31 +02:00
// Create upload session
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Starting multipart upload" )
2019-06-17 10:34:30 +02:00
session , err := o . createUploadSession ( ctx , modTime )
2015-10-04 23:08:31 +02:00
if err != nil {
2019-03-11 13:54:14 +01:00
close ( uploadURLChan )
atexit . Unregister ( cancelFuncHandle )
2018-03-17 10:46:06 +01:00
return nil , err
2015-10-04 23:08:31 +02:00
}
uploadURL := session . UploadURL
2019-03-11 13:54:14 +01:00
uploadURLChan <- uploadURL
2015-10-04 23:08:31 +02:00
defer func ( ) {
if err != nil {
2019-03-11 13:54:14 +01:00
fs . Debugf ( o , "Error encountered during upload: %v" , err )
gracefulCancel ( )
2015-10-04 23:08:31 +02:00
}
2019-03-11 13:54:14 +01:00
atexit . Unregister ( cancelFuncHandle )
2015-10-04 23:08:31 +02:00
} ( )
// Upload the chunks
remaining := size
position := int64 ( 0 )
for remaining > 0 {
2018-05-14 19:06:57 +02:00
n := int64 ( o . fs . opt . ChunkSize )
2015-10-04 23:08:31 +02:00
if remaining < n {
n = remaining
}
2018-01-12 17:30:54 +01:00
seg := readers . NewRepeatableReader ( io . LimitReader ( in , n ) )
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Uploading segment %d/%d size %d" , position , size , n )
2020-03-21 23:31:51 +01:00
info , err = o . uploadFragment ( ctx , uploadURL , position , size , seg , n , options ... )
2015-10-04 23:08:31 +02:00
if err != nil {
2018-03-17 10:46:06 +01:00
return nil , err
2015-10-04 23:08:31 +02:00
}
remaining -= n
position += n
}
2018-03-17 10:46:06 +01:00
return info , nil
}
2018-09-04 17:57:47 +02:00
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
2020-03-21 23:31:51 +01:00
func ( o * Object ) uploadSinglepart ( ctx context . Context , in io . Reader , size int64 , modTime time . Time , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-09-04 18:37:52 +02:00
if size < 0 || size > int64 ( fs . SizeSuffix ( 4 * 1024 * 1024 ) ) {
2018-11-02 13:12:22 +01:00
return nil , errors . New ( "size passed into uploadSinglepart must be >= 0 and <= 4MiB" )
2018-09-04 17:57:47 +02:00
}
fs . Debugf ( o , "Starting singlepart upload" )
var resp * http . Response
var opts rest . Opts
2019-06-17 10:34:30 +02:00
leaf , directoryID , _ := o . fs . dirCache . FindPath ( ctx , o . remote , false )
2019-01-09 06:11:00 +01:00
trueDirID , drive , rootURL := parseNormalizedID ( directoryID )
2018-09-04 17:57:47 +02:00
if drive != "" {
opts = rest . Opts {
Method : "PUT" ,
RootURL : rootURL ,
2020-01-14 18:33:35 +01:00
Path : "/" + drive + "/items/" + trueDirID + ":/" + rest . URLPathEscape ( o . fs . opt . Enc . FromStandardName ( leaf ) ) + ":/content" ,
2018-09-04 17:57:47 +02:00
ContentLength : & size ,
Body : in ,
2020-03-21 23:31:51 +01:00
Options : options ,
2018-09-04 17:57:47 +02:00
}
} else {
opts = rest . Opts {
Method : "PUT" ,
Path : "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) + ":/content" ,
ContentLength : & size ,
Body : in ,
2020-03-21 23:31:51 +01:00
Options : options ,
2018-09-04 17:57:47 +02:00
}
}
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . CallJSON ( ctx , & opts , nil , & info )
2018-10-03 06:46:25 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2018-09-04 17:57:47 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
err = o . setMetaData ( info )
if err != nil {
return nil , err
}
// Set the mod time now and read metadata
2019-06-17 10:34:30 +02:00
return o . setModTime ( ctx , modTime )
2018-09-04 17:57:47 +02:00
}
2015-10-04 23:08:31 +02:00
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( err error ) {
2018-10-03 06:46:25 +02:00
if o . hasMetaData && o . isOneNoteFile {
return errors . New ( "can't upload content to a OneNote file" )
}
2017-01-29 21:42:43 +01:00
o . fs . tokenRenewer . Start ( )
defer o . fs . tokenRenewer . Stop ( )
2016-02-18 12:35:25 +01:00
size := src . Size ( )
2019-06-17 10:34:30 +02:00
modTime := src . ModTime ( ctx )
2016-02-18 12:35:25 +01:00
2018-09-04 17:57:47 +02:00
var info * api . Item
if size > 0 {
2020-03-21 23:31:51 +01:00
info , err = o . uploadMultipart ( ctx , in , size , modTime , options ... )
2018-09-04 17:57:47 +02:00
} else if size == 0 {
2020-03-21 23:31:51 +01:00
info , err = o . uploadSinglepart ( ctx , in , size , modTime , options ... )
2018-09-04 17:57:47 +02:00
} else {
2019-02-02 09:37:33 +01:00
return errors . New ( "unknown-sized upload not supported" )
2018-09-04 17:57:47 +02:00
}
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2018-09-04 17:57:47 +02:00
2018-03-15 08:06:17 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// Remove an object
2019-06-17 10:34:30 +02:00
func ( o * Object ) Remove ( ctx context . Context ) error {
2019-09-04 21:00:37 +02:00
return o . fs . deleteObject ( ctx , o . id )
2015-10-04 23:08:31 +02:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
2019-06-17 10:34:30 +02:00
func ( o * Object ) MimeType ( ctx context . Context ) string {
2016-09-21 23:13:24 +02:00
return o . mimeType
}
2018-05-13 10:16:56 +02:00
// ID returns the ID of the Object if known, or "" if not
func ( o * Object ) ID ( ) string {
return o . id
}
2019-01-09 06:11:00 +01:00
func newOptsCall ( normalizedID string , method string , route string ) ( opts rest . Opts ) {
id , drive , rootURL := parseNormalizedID ( normalizedID )
2018-07-11 19:48:59 +02:00
if drive != "" {
return rest . Opts {
Method : method ,
RootURL : rootURL ,
Path : "/" + drive + "/items/" + id + route ,
}
}
return rest . Opts {
Method : method ,
Path : "/items/" + id + route ,
}
}
2019-01-09 06:11:00 +01:00
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
// and returns itemID, driveID, rootURL.
// Such a normalized ID can come from (*Item).GetID()
func parseNormalizedID ( ID string ) ( string , string , string ) {
2018-07-11 19:48:59 +02:00
if strings . Index ( ID , "#" ) >= 0 {
s := strings . Split ( ID , "#" )
2018-08-18 12:06:22 +02:00
return s [ 1 ] , s [ 0 ] , graphURL + "/drives"
2018-07-11 19:48:59 +02:00
}
return ID , "" , ""
}
2019-01-09 06:11:00 +01:00
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
// returns a relative path for `target` based on `base` and a boolean `true`.
// Otherwise returns "", false.
func getRelativePathInsideBase ( base , target string ) ( string , bool ) {
if base == "" {
return target , true
}
baseSlash := base + "/"
if strings . HasPrefix ( target + "/" , baseSlash ) {
return target [ len ( baseSlash ) : ] , true
}
return "" , false
}
2019-03-11 09:30:38 +01:00
// Adds a ":" at the end of `remotePath` in a proper manner.
// If `remotePath` already ends with "/", change it to ":/"
// If `remotePath` is "", return "".
// A workaround for #2720 and #3039
func withTrailingColon ( remotePath string ) string {
if remotePath == "" {
return ""
}
if strings . HasSuffix ( remotePath , "/" ) {
return remotePath [ : len ( remotePath ) - 1 ] + ":/"
}
return remotePath + ":"
}
2015-10-04 23:08:31 +02:00
// Check the interfaces are satisfied
var (
2018-08-19 17:22:51 +02:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
_ fs . Copier = ( * Fs ) ( nil )
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
2016-12-09 16:39:29 +01:00
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
2018-04-16 23:19:25 +02:00
_ fs . Abouter = ( * Fs ) ( nil )
2018-10-09 14:11:48 +02:00
_ fs . PublicLinker = ( * Fs ) ( nil )
2016-12-09 16:39:29 +01:00
_ fs . Object = ( * Object ) ( nil )
_ fs . MimeTyper = & Object { }
2018-05-13 10:16:56 +02:00
_ fs . IDer = & Object { }
2015-10-04 23:08:31 +02:00
)