1
mirror of https://github.com/rclone/rclone synced 2024-11-29 07:55:12 +01:00

Add a native backend for oracle object storage - fixes #6299

This commit is contained in:
Manoj Ghosh 2022-10-13 05:04:56 -07:00 committed by GitHub
parent 90d23139f6
commit b16e50851a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 3022 additions and 9 deletions

View File

@ -62,6 +62,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)

View File

@ -34,6 +34,7 @@ import (
_ "github.com/rclone/rclone/backend/netstorage" _ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"

View File

@ -0,0 +1,158 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/rsa"
"errors"
"net/http"
"os"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/common/auth"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
)
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
switch opt.Provider {
case instancePrincipal:
return auth.InstancePrincipalConfigurationProvider()
case userPrincipal:
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
}
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
case resourcePrincipal:
return auth.ResourcePrincipalConfigurationProvider()
case noAuth:
fs.Infof("client", "using no auth provider")
return getNoAuthConfiguration()
default:
}
return common.DefaultConfigProvider(), nil
}
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
p, err := getConfigurationProvider(opt)
if err != nil {
return nil, err
}
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
if err != nil {
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
return nil, err
}
if opt.Region != "" {
client.SetRegion(opt.Region)
}
modifyClient(ctx, opt, &client.BaseClient)
return &client, err
}
func fileExists(filePath string) bool {
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
return false
}
return true
}
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
client.HTTPClient = getHTTPClient(ctx)
if opt.Provider == noAuth {
client.Signer = getNoAuthSigner()
}
}
// getClient makes http client according to the global options
// this has rclone specific options support like dump headers, body etc.
func getHTTPClient(ctx context.Context) *http.Client {
return fshttp.NewClient(ctx)
}
var retryErrorCodes = []int{
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// If this is an ocierr object, try and extract more useful information to determine if we should retry
if ociError, ok := err.(common.ServiceError); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(err) {
return true, err
}
// If it is a timeout then we want to retry that
if ociError.GetCode() == "RequestTimeout" {
return true, err
}
}
// Ok, not an oci error, check for generic failure conditions
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
return &noAuthConfigurator{}, nil
}
func getNoAuthSigner() common.HTTPRequestSigner {
return &noAuthSigner{}
}
type noAuthConfigurator struct {
}
type noAuthSigner struct {
}
func (n *noAuthSigner) Sign(*http.Request) error {
return nil
}
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
return nil, nil
}
func (n *noAuthConfigurator) KeyID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) UserOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) Region() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
return common.AuthConfig{
AuthType: common.UnknownAuthenticationType,
IsFromConfigFile: false,
OboToken: nil,
}, nil
}
// Check the interfaces are satisfied
var (
_ common.ConfigurationProvider = &noAuthConfigurator{}
_ common.HTTPRequestSigner = &noAuthSigner{}
)

View File

@ -0,0 +1,228 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Command Interface Implementation
// ------------------------------------------------------------
const (
operationRename = "rename"
operationListMultiPart = "list-multipart-uploads"
operationCleanup = "cleanup"
)
var commandHelp = []fs.CommandHelp{{
Name: operationRename,
Short: "change the name of an object",
Long: `This command can be used to rename a object.
Usage Examples:
rclone backend rename oss:bucket relative-object-path-under-bucket object-new-name
`,
Opts: nil,
}, {
Name: operationListMultiPart,
Short: "List the unfinished multipart uploads",
Long: `This command lists the unfinished multipart uploads in JSON format.
rclone backend list-multipart-uploads oss:bucket/path/to/object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads.
You can call it with no bucket in which case it lists all bucket, with
a bucket or with a bucket and path.
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
`,
}, {
Name: operationCleanup,
Short: "Remove unfinished multipart uploads.",
Long: `This command removes unfinished multipart uploads of age greater than
max-age which defaults to 24 hours.
Note that you can use -i/--dry-run with this command to see what it
would do.
rclone backend cleanup oss:bucket/path/to/object
rclone backend cleanup -o max-age=7w oss:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
},
}
/*
Command the backend to run a named command
The command run is name
args may be used to read arguments from
opts may be used to read optional arguments from
The result should be capable of being JSON encoded
If it is a string or a []string it will be shown to the user
otherwise it will be JSON encoded and shown to the user like that
*/
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
opt map[string]string) (result interface{}, err error) {
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
switch commandName {
case operationRename:
if len(args) < 2 {
return nil, fmt.Errorf("path to object or its new name to rename is empty")
}
remote := args[0]
newName := args[1]
return f.rename(ctx, remote, newName)
case operationListMultiPart:
return f.listMultipartUploadsAll(ctx)
case operationCleanup:
maxAge := 24 * time.Hour
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, maxAge)
default:
return nil, fs.ErrorCommandNotFound
}
}
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
if remote == "" {
return nil, fmt.Errorf("path to object file cannot be empty")
}
if newName == "" {
return nil, fmt.Errorf("the object's new name cannot be empty")
}
o := &Object{
fs: f,
remote: remote,
}
bucketName, objectPath := o.split()
err := o.readMetaData(ctx)
if err != nil {
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
if strings.HasPrefix(objectPath, bucketName) {
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
objectPath, bucketName)
}
return nil, fs.ErrorNotAFile
}
details := objectstorage.RenameObjectDetails{
SourceName: common.String(objectPath),
NewName: common.String(newName),
}
request := objectstorage.RenameObjectRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
RenameObjectDetails: details,
OpcClientRequestId: nil,
RequestMetadata: common.RequestMetadata{},
}
var response objectstorage.RenameObjectResponse
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.RenameObject(ctx, request)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
return "renamed successfully", nil
}
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
err error) {
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
bucket, directory := f.split("")
if bucket != "" {
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
if err != nil {
return uploadsMap, err
}
uploadsMap[bucket] = uploads
return uploadsMap, nil
}
entries, err := f.listBuckets(ctx)
if err != nil {
return uploadsMap, err
}
for _, entry := range entries {
bucket := entry.Remote()
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
if listErr != nil {
err = listErr
fs.Errorf(f, "%v", err)
}
uploadsMap[bucket] = uploads
}
return uploadsMap, err
}
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
var response objectstorage.ListMultipartUploadsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploads(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
// fs.Debugf(f, "failed to list multi part uploads %v", err)
return uploads, err
}
for index, item := range response.Items {
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
uploads = append(uploads, &response.Items[index])
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploads, nil
}

View File

@ -0,0 +1,155 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Implement Copier is an optional interfaces for Fs
//------------------------------------------------------------
// Copy src to this remote using server-side copy operations.
// This is stored with the remote path given
// It returns the destination Object and a possible error
// Will only be called if src.Fs().Name() == f.Name()
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
srcObj, ok := src.(*Object)
if !ok {
// fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err := f.copy(ctx, dstObj, srcObj)
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
srcBucket, srcPath := srcObj.split()
dstBucket, dstPath := dstObj.split()
if dstBucket != srcBucket {
exists, err := f.bucketExists(ctx, dstBucket)
if err != nil {
return err
}
if !exists {
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
}
}
copyObjectDetails := objectstorage.CopyObjectDetails{
SourceObjectName: common.String(srcPath),
DestinationRegion: common.String(dstObj.fs.opt.Region),
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
DestinationBucket: common.String(dstBucket),
DestinationObjectName: common.String(dstPath),
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
}
req := objectstorage.CopyObjectRequest{
NamespaceName: common.String(srcObj.fs.opt.Namespace),
BucketName: common.String(srcBucket),
CopyObjectDetails: copyObjectDetails,
}
var resp objectstorage.CopyObjectResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CopyObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
workRequestID := resp.OpcWorkRequestId
timeout := time.Duration(f.opt.CopyTimeout)
dstName := dstObj.String()
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
// To enable server side copy object, customers will have to
// grant policy to objectstorage service to manage object-family
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
// Another option to avoid the policy is to download and reupload the file.
// This download upload will work for maximum file size limit of 5GB
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
if err != nil {
return err
}
return err
}
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
client *objectstorage.ObjectStorageClient) error {
stateConf := &StateChangeConf{
Pending: []string{
string(objectstorage.WorkRequestStatusAccepted),
string(objectstorage.WorkRequestStatusInProgress),
string(objectstorage.WorkRequestStatusCanceling),
},
Target: []string{
string(objectstorage.WorkRequestSummaryStatusCompleted),
string(objectstorage.WorkRequestSummaryStatusCanceled),
string(objectstorage.WorkRequestStatusFailed),
},
Refresh: func() (interface{}, string, error) {
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
getWorkRequestRequest.WorkRequestId = wID
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
wr := &workRequestResponse.WorkRequest
return workRequestResponse, string(wr.Status), err
},
Timeout: timeout,
}
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
if e != nil {
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
}
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
if wr.Status == objectstorage.WorkRequestStatusFailed {
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
}
return nil
}
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
req := objectstorage.ListWorkRequestErrorsRequest{}
req.WorkRequestId = workRequestID
res, err := client.ListWorkRequestErrors(ctx, req)
if err != nil {
return "", err
}
allErrs := make([]string, 0)
for _, errs := range res.Items {
allErrs = append(allErrs, *errs.Message)
}
errorMessage := strings.Join(allErrs, "\n")
return errorMessage, nil
}

View File

@ -0,0 +1,621 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
)
// ------------------------------------------------------------
// Object Interface Implementation
// ------------------------------------------------------------
const (
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
// StandardTier object storage tier
ociMetaPrefix = "opc-meta-"
)
var archive = "archive"
var infrequentAccess = "infrequentaccess"
var standard = "standard"
var storageTierMap = map[string]*string{
archive: &archive,
infrequentAccess: &infrequentAccess,
standard: &standard,
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Object describes a oci bucket object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
md5 string // MD5 hash if known
bytes int64 // Size of the object
lastModified time.Time // The modified time of the object if known
meta map[string]string // The object metadata if known - may be nil
mimeType string // Content-Type of the object
// Metadata as pointers to strings as they often won't be present
storageTier *string // e.g. Standard
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// readMetaData gets the metadata if it hasn't already been fetched
func (o *Object) readMetaData(ctx context.Context) (err error) {
fs.Debugf(o, "trying to read metadata %v", o.remote)
if o.meta != nil {
return nil
}
info, err := o.headObject(ctx)
if err != nil {
return err
}
return o.decodeMetaDataHead(info)
}
// headObject gets the metadata from the object unconditionally
func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) {
bucketName, objectPath := o.split()
req := objectstorage.HeadObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(objectPath),
}
var response objectstorage.HeadObjectResponse
err = o.fs.pacer.Call(func() (bool, error) {
var err error
response, err = o.fs.srv.HeadObject(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
if svcErr, ok := err.(common.ServiceError); ok {
if svcErr.GetHTTPStatusCode() == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
}
}
return nil, err
}
o.fs.cache.MarkOK(bucketName)
return &response, err
}
func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) {
return o.setMetaData(
info.ContentLength,
info.ContentMd5,
info.ContentType,
info.LastModified,
info.StorageTier,
info.OpcMeta)
}
func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) {
return o.setMetaData(
info.ContentLength,
info.ContentMd5,
info.ContentType,
info.LastModified,
info.StorageTier,
info.OpcMeta)
}
func (o *Object) setMetaData(
contentLength *int64,
contentMd5 *string,
contentType *string,
lastModified *common.SDKTime,
storageTier interface{},
meta map[string]string) error {
if contentLength != nil {
o.bytes = *contentLength
}
if contentMd5 != nil {
md5, err := o.base64ToMd5(*contentMd5)
if err == nil {
o.md5 = md5
}
}
o.meta = meta
if o.meta == nil {
o.meta = map[string]string{}
}
// Read MD5 from metadata if present
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
md5, err := o.base64ToMd5(md5sumBase64)
if err != nil {
o.md5 = md5
}
}
if lastModified == nil {
o.lastModified = time.Now()
fs.Logf(o, "Failed to read last modified")
} else {
o.lastModified = lastModified.Time
}
if contentType != nil {
o.mimeType = *contentType
}
if storageTier == nil || storageTier == "" {
o.storageTier = storageTierMap[standard]
} else {
tier := strings.ToLower(fmt.Sprintf("%v", storageTier))
o.storageTier = storageTierMap[tier]
}
return nil
}
func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) {
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
if err != nil {
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
return "", err
} else if len(md5sumBytes) != 16 {
fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64)
return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64)
}
return hex.EncodeToString(md5sumBytes), nil
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// GetTier returns storage class as string
func (o *Object) GetTier() string {
if o.storageTier == nil || *o.storageTier == "" {
return standard
}
return *o.storageTier
}
// SetTier performs changing storage class
func (o *Object) SetTier(tier string) (err error) {
ctx := context.TODO()
tier = strings.ToLower(tier)
bucketName, bucketPath := o.split()
tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier)
if !ok {
return fmt.Errorf("not a valid storage tier %v ", tier)
}
req := objectstorage.UpdateObjectStorageTierRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{
ObjectName: common.String(bucketPath),
StorageTier: tierEnum,
},
}
_, err = o.fs.srv.UpdateObjectStorageTier(ctx, req)
if err != nil {
return err
}
o.storageTier = storageTierMap[tier]
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
// Convert base64 encoded md5 into lower case hex
if o.md5 == "" {
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
}
return o.md5, nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned to the http headers
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
if o.fs.ci.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == "" {
return o.lastModified
}
modTime, err := swift.FloatStringToTime(d)
if err != nil {
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData(ctx)
if err != nil {
return err
}
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.fs.Copy(ctx, o, o.remote)
return err
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucketName, bucketPath := o.split()
req := objectstorage.DeleteObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.DeleteObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
return err
}
// Open object file
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucketName, bucketPath := o.split()
req := objectstorage.GetObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
}
o.applyGetObjectOptions(&req, options...)
var resp objectstorage.GetObjectResponse
err := o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.srv.GetObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
// read size from ContentLength or ContentRange
bytes := resp.ContentLength
if resp.ContentRange != nil {
var contentRange = *resp.ContentRange
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
bytes = &i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
err = o.decodeMetaDataObject(&resp)
if err != nil {
return nil, err
}
o.bytes = *bytes
return resp.HTTPResponse().Body, nil
}
// Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, bucketPath := o.split()
err = o.fs.makeBucket(ctx, bucketName)
if err != nil {
return err
}
// determine if we like upload single or multipart.
size := src.Size()
multipart := size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
}
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart {
chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err
}
} else {
req := objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
}
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.PutObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
fs.Errorf(o, "put object failed %v", err)
return err
}
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
return o.readMetaData(ctx)
}
func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.CacheControl = common.String(value)
case "content-disposition":
req.ContentDisposition = common.String(value)
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.OpcMeta[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
}
func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) {
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.HttpResponseCacheControl = common.String(value)
case "content-disposition":
req.HttpResponseContentDisposition = common.String(value)
case "content-encoding":
req.HttpResponseContentEncoding = common.String(value)
case "content-language":
req.HttpResponseContentLanguage = common.String(value)
case "content-type":
req.HttpResponseContentType = common.String(value)
case "range":
// do nothing
default:
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.Metadata[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
}
func metadataWithOpcPrefix(src map[string]string) map[string]string {
dst := make(map[string]string)
for lowerKey, value := range src {
if !strings.HasPrefix(lowerKey, ociMetaPrefix) {
dst[ociMetaPrefix+lowerKey] = value
}
}
return dst
}

View File

@ -0,0 +1,229 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
)
const (
maxSizeForCopy = 4768 * 1024 * 1024
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
defaultUploadConcurrency = 10
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 100 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
)
const (
userPrincipal = "user_principal_auth"
instancePrincipal = "instance_principal_auth"
resourcePrincipal = "resource_principal_auth"
environmentAuth = "env_auth"
noAuth = "no_auth"
userPrincipalHelpText = `use an OCI user and an API key for authentication.
youll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls.
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
resourcePrincipalHelpText = `use resource principals to make API calls`
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
noAuthHelpText = `no credentials needed, this is typically for reading public buckets`
)
// Options defines the configuration for this backend
type Options struct {
Provider string `config:"provider"`
Compartment string `config:"compartment"`
Namespace string `config:"namespace"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
ConfigFile string `config:"config_file"`
ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
}
func newOptions() []fs.Option {
return []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your Auth Provider",
Required: true,
Default: environmentAuth,
Examples: []fs.OptionExample{{
Value: environmentAuth,
Help: environmentAuthHelpText,
}, {
Value: userPrincipal,
Help: userPrincipalHelpText,
}, {
Value: instancePrincipal,
Help: instancePrincipalHelpText,
}, {
Value: resourcePrincipal,
Help: resourcePrincipalHelpText,
}, {
Value: noAuth,
Help: noAuthHelpText,
}},
}, {
Name: "namespace",
Help: "Object storage namespace",
Required: true,
}, {
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
}, {
Name: "region",
Help: "Object storage Region",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.",
Required: false,
}, {
Name: "config_file",
Help: "Path to OCI config file",
Provider: userPrincipal,
Default: "~/.oci/config",
Examples: []fs.OptionExample{{
Value: "~/.oci/config",
Help: "oci configuration file location",
}},
}, {
Name: "config_profile",
Help: "Profile name inside the oci config file",
Provider: userPrincipal,
Default: "Default",
Examples: []fs.OptionExample{{
Value: "Default",
Help: "Use the default profile",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high-speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5 MiB and there can be at
most 10,000 chunks, this means that by default the maximum size of
a file you can stream upload is 48 GiB. If you wish to stream upload
larger files then you will need to increase chunk_size.
Increasing the chunk size decreases the accuracy of the progress
statistics displayed with "-P" flag.
`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: defaultUploadConcurrency,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5 GiB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "copy_timeout",
Help: `Timeout for copy.
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
`,
Default: defaultCopyTimeoutDuration,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Any UTF-8 character is valid in a key, however it can't handle
// invalid UTF-8 and / have a special meaning.
//
// The SDK can't seem to handle uploading files called '.
// - initial / encoding
// - doubled / encoding
// - trailing / encoding
// so that OSS keys are always valid file names
Default: encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash |
encoder.EncodeDot,
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}}
}

View File

@ -0,0 +1,691 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package oracleobjectstorage provides an interface to the OCI object storage system.
package oracleobjectstorage
import (
"context"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "oracleobjectstorage",
Description: "Oracle Cloud Infrastructure Object Storage",
Prefix: "oos",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: newOptions(),
})
}
// Fs represents a remote object storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *objectstorage.ObjectStorageClient // the connection to the object storage
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
}
// NewFs Initialize backend
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
objectStorageClient, err := newObjectStorageClient(ctx, opt)
if err != nil {
return nil, err
}
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
f := &Fs{
name: name,
opt: *opt,
ci: ci,
srv: objectStorageClient,
cache: bucket.NewCache(),
pacer: fs.NewPacer(ctx, p),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
SlowModTime: true,
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
return f, nil
}
// return an error with fs which points to the parent
return f, fs.ErrorIsFile
}
return f, err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// ------------------------------------------------------------
// Implement backed that represents a remote object storage server
// Fs is the interface a cloud storage system must provide
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return "oss:root"
}
if f.rootDirectory == "" {
return fmt.Sprintf("oss:bucket %s", f.rootBucket)
}
return fmt.Sprintf("oss:bucket %s, path %s", f.rootBucket, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucketName, directory := f.split(dir)
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
if bucketName == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
// list the objects into the function supplied from
// the bucket and root supplied
// (bucket, directory) is the starting directory
// If prefix is set then it is removed from all file names
// If addBucket is set then it adds the bucket to the start of the remotes generated
// If recurse is set the function will recursively list
// If limit is > 0 then it limits to that many files (must be less than 1000)
// If hidden is set then it will list the hidden (deleted) files too.
// if findFile is set it will look for files called (bucket, directory)
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
fn listFn) (err error) {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
chunkSize := 1000
if limit > 0 {
chunkSize = limit
}
var request = objectstorage.ListObjectsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucket),
Prefix: common.String(directory),
Limit: common.Int(chunkSize),
Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
}
if delimiter != "" {
request.Delimiter = common.String(delimiter)
}
for {
var resp objectstorage.ListObjectsResponse
err = f.pacer.Call(func() (bool, error) {
var err error
resp, err = f.srv.ListObjects(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetHTTPStatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
if f.rootBucket == "" {
// if listing from the root ignore wrong region requests returning
// empty directory
if reqErr, ok := err.(common.ServiceError); ok {
// 301 if wrong region for bucket
if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
return nil
}
}
}
return err
}
if !recurse {
for _, commonPrefix := range resp.ListObjects.Prefixes {
if commonPrefix == "" {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := commonPrefix
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
remote = strings.TrimSuffix(remote, "/")
err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
if err != nil {
return err
}
}
}
for i := range resp.Objects {
object := &resp.Objects[i]
// Finish if file name no longer has prefix
//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil
//}
remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue
}
remote = remote[len(prefix):]
// Check for directory
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
continue // skip directory marker
}
if isDirectory && len(remote) > 1 {
remote = remote[:len(remote)-1]
}
err = fn(remote, object, isDirectory)
if err != nil {
return err
}
}
// end if no NextFileName
if resp.NextStartWith == nil {
break
}
request.Start = resp.NextStartWith
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
}
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
}
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
if f.opt.Provider == noAuth {
return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
}
var request = objectstorage.ListBucketsRequest{
NamespaceName: common.String(f.opt.Namespace),
CompartmentId: common.String(f.opt.Compartment),
}
var resp objectstorage.ListBucketsResponse
for {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.ListBuckets(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
for _, item := range resp.Items {
bucketName := f.opt.Enc.ToStandardName(*item.Name)
f.cache.MarkOK(bucketName)
d := fs.NewDir(bucketName, item.TimeCreated.Time)
entries = append(entries, d)
}
if resp.OpcNextPage == nil {
break
}
request.Page = resp.OpcNextPage
}
return entries, nil
}
// Return an Object from a path
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.TimeModified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = info.TimeModified.Time
}
if info.Md5 != nil {
md5, err := o.base64ToMd5(*info.Md5)
if err != nil {
o.md5 = md5
}
}
o.bytes = *info.Size
o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
} else {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// Put the object into the bucket
// Copy the reader in to the new object which is returned
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucketName, _ := f.split(dir)
return f.makeBucket(ctx, bucketName)
}
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
return f.cache.Create(bucketName, func() error {
details := objectstorage.CreateBucketDetails{
Name: common.String(bucketName),
CompartmentId: common.String(f.opt.Compartment),
PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
}
req := objectstorage.CreateBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
CreateBucketDetails: details,
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CreateBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
}
if svcErr, ok := err.(common.ServiceError); ok {
if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
err = nil
}
}
return err
}, func() (bool, error) {
return f.bucketExists(ctx, bucketName)
})
}
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
req := objectstorage.HeadBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.HeadBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
return true, nil
}
if err, ok := err.(common.ServiceError); ok {
if err.GetHTTPStatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucketName, directory := f.split(dir)
if bucketName == "" || directory != "" {
return nil
}
return f.cache.Remove(bucketName, func() error {
req := objectstorage.DeleteBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.DeleteBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucketName)
}
return err
})
}
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == "" {
return nil
}
request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
return err
}
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
uploads []*objectstorage.MultipartUpload) (err error) {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
for _, upload := range uploads {
if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
age := time.Since(upload.TimeCreated.Time)
what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
upload.TimeCreated, age)
if age > maxAge {
fs.Infof(f, "removing %s", what)
if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue
}
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
if ignoreErr != nil {
// fs.Debugf(f, "ignoring error %s", ignoreErr)
}
} else {
// fs.Debugf(f, "ignoring %s", what)
}
} else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
}
}
return err
}
// CleanUp removes all pending multipart uploads
func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
uploadsMap, err := f.listMultipartUploadsAll(ctx)
if err != nil {
return err
}
for bucketName, uploads := range uploadsMap {
cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
if err != nil {
fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
err = cleanErr
}
}
return err
}
// CleanUp removes all pending multipart uploads older than 24 hours
func (f *Fs) CleanUp(ctx context.Context) (err error) {
return f.cleanUp(ctx, 24*time.Hour)
}
// ------------------------------------------------------------
// Implement ListRer is an optional interfaces for Fs
//------------------------------------------------------------
/*
ListR lists the objects and directories of the Fs starting
from dir recursively into out.
dir should be "" to start from the root, and should not
have trailing slashes.
This should return ErrDirNotFound if the directory isn't
found.
It should call callback for each tranche of entries read.
These need not be returned in any particular order. If
callback returns an error then the listing will stop
immediately.
Don't implement this unless you have a more efficient way
of listing recursively that doing a directory traversal.
*/
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucketName, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucketName == "" {
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucketName := entry.Remote()
err = listR(bucketName, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucketName)
}
} else {
err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucketName)
}
return list.Flush()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@ -0,0 +1,33 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOracleObjectStorage:",
TiersToTest: []string{"standard", "archive"},
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@ -0,0 +1,7 @@
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
package oracleobjectstorage

View File

@ -0,0 +1,362 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/rclone/rclone/fs"
)
var refreshGracePeriod = 30 * time.Second
// StateRefreshFunc is a function type used for StateChangeConf that is
// responsible for refreshing the item being watched for a state change.
//
// It returns three results. `result` is any object that will be returned
// as the final object after waiting for state change. This allows you to
// return the final updated object, for example an EC2 instance after refreshing
// it. A nil result represents not found.
//
// `state` is the latest state of that object. And `err` is any error that
// may have happened while refreshing the state.
type StateRefreshFunc func() (result interface{}, state string, err error)
// StateChangeConf is the configuration struct used for `WaitForState`.
type StateChangeConf struct {
Delay time.Duration // Wait this time before starting checks
Pending []string // States that are "allowed" and will continue trying
Refresh StateRefreshFunc // Refreshes the current state
Target []string // Target state
Timeout time.Duration // The amount of time to wait before timeout
MinTimeout time.Duration // Smallest time to wait before refreshes
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
NotFoundChecks int // Number of times to allow not found (nil result from Refresh)
// This is to work around inconsistent APIs
ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously
}
// WaitForStateContext watches an object and waits for it to achieve the state
// specified in the configuration using the specified Refresh() func,
// waiting the number of seconds specified in the timeout configuration.
//
// If the Refresh function returns an error, exit immediately with that error.
//
// If the Refresh function returns a state other than the Target state or one
// listed in Pending, return immediately with an error.
//
// If the Timeout is exceeded before reaching the Target state, return an
// error.
//
// Otherwise, the result is the result of the first call to the Refresh function to
// reach the target state.
//
// Cancellation from the passed in context will cancel the refresh loop
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
notfoundTick := 0
targetOccurrence := 0
// Set a default for times to check for not found
if conf.NotFoundChecks == 0 {
conf.NotFoundChecks = 20
}
if conf.ContinuousTargetOccurrence == 0 {
conf.ContinuousTargetOccurrence = 1
}
type Result struct {
Result interface{}
State string
Error error
Done bool
}
// Read every result from the refresh loop, waiting for a positive result.Done.
resCh := make(chan Result, 1)
// cancellation channel for the refresh loop
cancelCh := make(chan struct{})
result := Result{}
go func() {
defer close(resCh)
select {
case <-time.After(conf.Delay):
case <-cancelCh:
return
}
// start with 0 delay for the first loop
var wait time.Duration
for {
// store the last result
resCh <- result
// wait and watch for cancellation
select {
case <-cancelCh:
return
case <-time.After(wait):
// first round had no wait
if wait == 0 {
wait = 100 * time.Millisecond
}
}
res, currentState, err := conf.Refresh()
result = Result{
Result: res,
State: currentState,
Error: err,
}
if err != nil {
resCh <- result
return
}
// If we're waiting for the absence of a thing, then return
if res == nil && len(conf.Target) == 0 {
targetOccurrence++
if conf.ContinuousTargetOccurrence == targetOccurrence {
result.Done = true
resCh <- result
return
}
continue
}
if res == nil {
// If we didn't find the resource, check if we have been
// not finding it for a while, and if so, report an error.
notfoundTick++
if notfoundTick > conf.NotFoundChecks {
result.Error = &NotFoundError{
LastError: err,
Retries: notfoundTick,
}
resCh <- result
return
}
} else {
// Reset the counter for when a resource isn't found
notfoundTick = 0
found := false
for _, allowed := range conf.Target {
if currentState == allowed {
found = true
targetOccurrence++
if conf.ContinuousTargetOccurrence == targetOccurrence {
result.Done = true
resCh <- result
return
}
continue
}
}
for _, allowed := range conf.Pending {
if currentState == allowed {
found = true
targetOccurrence = 0
break
}
}
if !found && len(conf.Pending) > 0 {
result.Error = &UnexpectedStateError{
LastError: err,
State: result.State,
ExpectedState: conf.Target,
}
resCh <- result
return
}
}
// Wait between refreshes using exponential backoff, except when
// waiting for the target state to reoccur.
if targetOccurrence == 0 {
wait *= 2
}
// If a poll interval has been specified, choose that interval.
// Otherwise, bound the default value.
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
wait = conf.PollInterval
} else {
if wait < conf.MinTimeout {
wait = conf.MinTimeout
} else if wait > 10*time.Second {
wait = 10 * time.Second
}
}
// fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait)
}
}()
// store the last value result from the refresh loop
lastResult := Result{}
timeout := time.After(conf.Timeout)
for {
select {
case r, ok := <-resCh:
// channel closed, so return the last result
if !ok {
return lastResult.Result, lastResult.Error
}
// we reached the intended state
if r.Done {
return r.Result, r.Error
}
// still waiting, store the last result
lastResult = r
case <-ctx.Done():
close(cancelCh)
return nil, ctx.Err()
case <-timeout:
// fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout)
// fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
// cancel the goroutine and start our grace period timer
close(cancelCh)
timeout := time.After(refreshGracePeriod)
// we need a for loop and a label to break on, because we may have
// an extra response value to read, but still want to wait for the
// channel to close.
forSelect:
for {
select {
case r, ok := <-resCh:
if r.Done {
// the last refresh loop reached the desired state
return r.Result, r.Error
}
if !ok {
// the goroutine returned
break forSelect
}
// target state not reached, save the result for the
// TimeoutError and wait for the channel to close
lastResult = r
case <-ctx.Done():
fs.Errorf(entityType, "Context cancellation detected, abandoning grace period")
break forSelect
case <-timeout:
fs.Errorf(entityType, "WaitForState exceeded refresh grace period")
break forSelect
}
}
return nil, &TimeoutError{
LastError: lastResult.Error,
LastState: lastResult.State,
Timeout: conf.Timeout,
ExpectedState: conf.Target,
}
}
}
}
// NotFoundError resource not found error
type NotFoundError struct {
LastError error
LastRequest interface{}
LastResponse interface{}
Message string
Retries int
}
func (e *NotFoundError) Error() string {
if e.Message != "" {
return e.Message
}
if e.Retries > 0 {
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
}
return "couldn't find resource"
}
func (e *NotFoundError) Unwrap() error {
return e.LastError
}
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
type UnexpectedStateError struct {
LastError error
State string
ExpectedState []string
}
func (e *UnexpectedStateError) Error() string {
return fmt.Sprintf(
"unexpected state '%s', wanted target '%s'. last error: %s",
e.State,
strings.Join(e.ExpectedState, ", "),
e.LastError,
)
}
func (e *UnexpectedStateError) Unwrap() error {
return e.LastError
}
// TimeoutError is returned when WaitForState times out
type TimeoutError struct {
LastError error
LastState string
Timeout time.Duration
ExpectedState []string
}
func (e *TimeoutError) Error() string {
expectedState := "resource to be gone"
if len(e.ExpectedState) > 0 {
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
}
extraInfo := make([]string, 0)
if e.LastState != "" {
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
}
if e.Timeout > 0 {
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
}
suffix := ""
if len(extraInfo) > 0 {
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
}
if e.LastError != nil {
return fmt.Sprintf("timeout while waiting for %s%s: %s",
expectedState, suffix, e.LastError)
}
return fmt.Sprintf("timeout while waiting for %s%s",
expectedState, suffix)
}
func (e *TimeoutError) Unwrap() error {
return e.LastError
}

View File

@ -0,0 +1,515 @@
---
title: "Oracle Object Storage"
description: "Rclone docs for Oracle Object Storage"
---
# {{< icon "fa-light fa-cloud" >}} Oracle Object Storage
[Oracle Object Storage Overview](https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/objectstorageoverview.htm)
[Oracle Object Storage FAQ](https://www.oracle.com/cloud/storage/object-storage/faq/)
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, e.g. `remote:bucket/path/to/dir`.
## Configuration
Here is an example of making an oracle object storage configuration. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
r) Rename remote
c) Copy remote
s) Set configuration password
q) Quit config
e/n/d/r/c/s/q> n
Enter name for new remote.
name> remote
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
XX / Oracle Cloud Infrastructure Object Storage
\ (oracleobjectstorage)
Storage> oracleobjectstorage
Option provider.
Choose your Auth Provider
Choose a number from below, or type in your own string value.
Press Enter for the default (env_auth).
1 / automatically pickup the credentials from runtime(env), first one to provide auth wins
\ (env_auth)
/ use an OCI user and an API key for authentication.
2 | youll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
| https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
\ (user_principal_auth)
/ use instance principals to authorize an instance to make API calls.
3 | each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
| https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
\ (instance_principal_auth)
4 / use resource principals to make API calls
\ (resource_principal_auth)
5 / no credentials needed, this is typically for reading public buckets
\ (no_auth)
provider> 2
Option namespace.
Object storage namespace
Enter a value.
namespace> idbamagbg734
Option compartment.
Object storage compartment OCID
Enter a value.
compartment> ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
Option region.
Object storage Region
Enter a value.
region> us-ashburn-1
Option endpoint.
Endpoint for Object storage API.
Leave blank to use the default endpoint for the region.
Enter a value. Press Enter to leave empty.
endpoint>
Option config_file.
Path to OCI config file
Choose a number from below, or type in your own string value.
Press Enter for the default (~/.oci/config).
1 / oci configuration file location
\ (~/.oci/config)
config_file> /etc/oci/dev.conf
Option config_profile.
Profile name inside OCI config file
Choose a number from below, or type in your own string value.
Press Enter for the default (Default).
1 / Use the default profile
\ (Default)
config_profile> Test
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Configuration complete.
Options:
- type: oracleobjectstorage
- namespace: idbamagbg734
- compartment: ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
- region: us-ashburn-1
- provider: user_principal_auth
- oci_config_file: /etc/oci/dev.conf
- oci_config_profile: Test
Keep this "remote" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
See all buckets
rclone lsd remote:
Create a new bucket
rclone mkdir remote:bucket
List the contents of a bucket
rclone ls remote:bucket
rclone ls remote:bucket --max-depth 1
### Modified time
The modified time is stored as metadata on the object as
`opc-meta-mtime` as floating point since the epoch, accurate to 1 ns.
If the modification time needs to be updated rclone will attempt to perform a server
side copy to update the modification if the object can be copied in a single part.
In the case the object is larger than 5Gb, the object will be uploaded rather than copied.
Note that reading this from the object takes an additional `HEAD` request as the metadata
isn't returned in object listings.
### Multipart uploads
rclone supports multipart uploads with OOS which means that it can
upload files bigger than 5 GiB.
Note that files uploaded *both* with multipart upload *and* through
crypt remotes do not have MD5 sums.
rclone switches from single part uploads to multipart uploads at the
point specified by `--oos-upload-cutoff`. This can be a maximum of 5 GiB
and a minimum of 0 (ie always upload multipart files).
The chunk sizes used in the multipart upload are specified by
`--oos-chunk-size` and the number of chunks uploaded concurrently is
specified by `--oos-upload-concurrency`.
Multipart uploads will use `--transfers` * `--oos-upload-concurrency` *
`--oos-chunk-size` extra memory. Single part uploads to not use extra
memory.
Single part transfers can be faster than multipart transfers or slower
depending on your latency from oos - the more latency, the more likely
single part transfers will be faster.
Increasing `--oos-upload-concurrency` will increase throughput (8 would
be a sensible value) and increasing `--oos-chunk-size` also increases
throughput (16M would be sensible). Increasing either of these will
use more memory. The default values are high enough to gain most of
the possible performance without using too much memory.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/oracleobjectstorage/oracleobjectstorage.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
#### --oos-provider
Choose your Auth Provider
Properties:
- Config: provider
- Env Var: RCLONE_OOS_PROVIDER
- Type: string
- Default: "env_auth"
- Examples:
- "env_auth"
- automatically pickup the credentials from runtime(env), first one to provide auth wins
- "user_principal_auth"
- use an OCI user and an API key for authentication.
- youll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
- https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
- "instance_principal_auth"
- use instance principals to authorize an instance to make API calls.
- each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
- https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
- "resource_principal_auth"
- use resource principals to make API calls
- "no_auth"
- no credentials needed, this is typically for reading public buckets
#### --oos-namespace
Object storage namespace
Properties:
- Config: namespace
- Env Var: RCLONE_OOS_NAMESPACE
- Type: string
- Required: true
#### --oos-compartment
Object storage compartment OCID
Properties:
- Config: compartment
- Env Var: RCLONE_OOS_COMPARTMENT
- Provider: !no_auth
- Type: string
- Required: true
#### --oos-region
Object storage Region
Properties:
- Config: region
- Env Var: RCLONE_OOS_REGION
- Type: string
- Required: true
#### --oos-endpoint
Endpoint for Object storage API.
Leave blank to use the default endpoint for the region.
Properties:
- Config: endpoint
- Env Var: RCLONE_OOS_ENDPOINT
- Type: string
- Required: false
#### --oos-config-file
Path to OCI config file
Properties:
- Config: config_file
- Env Var: RCLONE_OOS_CONFIG_FILE
- Provider: user_principal_auth
- Type: string
- Default: "~/.oci/config"
- Examples:
- "~/.oci/config"
- oci configuration file location
#### --oos-config-profile
Path to OCI config file
Properties:
- Config: config_profile
- Env Var: RCLONE_OOS_CONFIG_PROFILE
- Provider: user_principal_auth
- Type: string
- Default: "Default"
- Examples:
- "Default"
- Use the default profile
### Advanced options
Here are the Advanced options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
#### --oos-upload-cutoff
Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.
Properties:
- Config: upload_cutoff
- Env Var: RCLONE_OOS_UPLOAD_CUTOFF
- Type: SizeSuffix
- Default: 200Mi
#### --oos-chunk-size
Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high-speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5 MiB and there can be at
most 10,000 chunks, this means that by default the maximum size of
a file you can stream upload is 48 GiB. If you wish to stream upload
larger files then you will need to increase chunk_size.
Increasing the chunk size decreases the accuracy of the progress
statistics displayed with "-P" flag.
Properties:
- Config: chunk_size
- Env Var: RCLONE_OOS_CHUNK_SIZE
- Type: SizeSuffix
- Default: 5Mi
#### --oos-upload-concurrency
Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.
Properties:
- Config: upload_concurrency
- Env Var: RCLONE_OOS_UPLOAD_CONCURRENCY
- Type: int
- Default: 10
#### --oos-copy-cutoff
Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5 GiB.
Properties:
- Config: copy_cutoff
- Env Var: RCLONE_OOS_COPY_CUTOFF
- Type: SizeSuffix
- Default: 4.656Gi
#### --oos-copy-timeout
Timeout for copy.
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
Properties:
- Config: copy_timeout
- Env Var: RCLONE_OOS_COPY_TIMEOUT
- Type: Duration
- Default: 1m0s
#### --oos-disable-checksum
Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.
Properties:
- Config: disable_checksum
- Env Var: RCLONE_OOS_DISABLE_CHECKSUM
- Type: bool
- Default: false
#### --oos-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_OOS_ENCODING
- Type: MultiEncoder
- Default: Slash,InvalidUtf8,Dot
#### --oos-leave-parts-on-error
If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up.
Properties:
- Config: leave_parts_on_error
- Env Var: RCLONE_OOS_LEAVE_PARTS_ON_ERROR
- Type: bool
- Default: false
## Backend commands
Here are the commands specific to the oracleobjectstorage backend.
Run them with
rclone backend COMMAND remote:
The help below will explain what arguments each command takes.
See the [backend](/commands/rclone_backend/) command for more
info on how to pass options and arguments.
These can be run on a running backend using the rc command
[backend/command](/rc/#backend-command).
### rename
change the name of an object
rclone backend rename remote: [options] [<arguments>+]
This command can be used to rename a object.
Usage Examples:
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
### list-multipart-uploads
List the unfinished multipart uploads
rclone backend list-multipart-uploads remote: [options] [<arguments>+]
This command lists the unfinished multipart uploads in JSON format.
rclone backend list-multipart-uploads oos:bucket/path/to/object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads.
You can call it with no bucket in which case it lists all bucket, with
a bucket or with a bucket and path.
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
### cleanup
Remove unfinished multipart uploads.
rclone backend cleanup remote: [options] [<arguments>+]
This command removes unfinished multipart uploads of age greater than
max-age which defaults to 24 hours.
Note that you can use -i/--dry-run with this command to see what it
would do.
rclone backend cleanup oos:bucket/path/to/object
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
Options:
- "max-age": Max age of upload to delete
{{< rem autogenerated options stop >}}

View File

@ -385,3 +385,13 @@ backends:
fastlist: false fastlist: false
ignore: ignore:
- TestRWFileHandleWriteNoWrite - TestRWFileHandleWriteNoWrite
- backend: "oracleobjectstorage"
remote: "TestOracleObjectStorage:"
fastlist: true
ignore:
- TestIntegration/FsMkdir/FsEncoding/control_chars
- TestIntegration/FsMkdir/FsEncoding/leading_CR
- TestIntegration/FsMkdir/FsEncoding/leading_LF
- TestIntegration/FsMkdir/FsEncoding/trailing_CR
- TestIntegration/FsMkdir/FsEncoding/trailing_LF
- TestIntegration/FsMkdir/FsEncoding/leading_HT

7
go.mod
View File

@ -28,6 +28,7 @@ require (
github.com/go-chi/chi/v5 v5.0.7 github.com/go-chi/chi/v5 v5.0.7
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/hanwen/go-fuse/v2 v2.1.0 github.com/hanwen/go-fuse/v2 v2.1.0
github.com/hirochachacha/go-smb2 v1.1.0
github.com/iguanesolutions/go-systemd/v5 v5.1.0 github.com/iguanesolutions/go-systemd/v5 v5.1.0
github.com/jcmturner/gokrb5/v8 v8.4.3 github.com/jcmturner/gokrb5/v8 v8.4.3
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004
@ -39,6 +40,7 @@ require (
github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir v1.1.0
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1 github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1
github.com/ncw/swift/v2 v2.0.1 github.com/ncw/swift/v2 v2.0.1
github.com/oracle/oci-go-sdk/v65 v65.1.0
github.com/patrickmn/go-cache v2.1.0+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/sftp v1.13.5 github.com/pkg/sftp v1.13.5
github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib v1.0.0
@ -86,6 +88,7 @@ require (
github.com/gdamore/encoding v1.0.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect
github.com/geoffgarside/ber v1.1.0 // indirect github.com/geoffgarside/ber v1.1.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
@ -93,7 +96,6 @@ require (
github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hirochachacha/go-smb2 v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
@ -115,8 +117,7 @@ require (
github.com/prometheus/procfs v0.7.3 // indirect github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/sony/gobreaker v0.5.0 // indirect
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect github.com/tklauser/numcpus v0.4.0 // indirect

13
go.sum
View File

@ -227,6 +227,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@ -382,8 +384,6 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
github.com/jlaffaye/ftp v0.0.0-20220630165035-11536801d1ff h1:tN6UCYCBFNrPwvKf4RP9cIhGo6GcZ/IQTN8nqD7eCok=
github.com/jlaffaye/ftp v0.0.0-20220630165035-11536801d1ff/go.mod h1:hhq4G4crv+nW2qXtNYcuzLeOudG92Ps37HEKeg2e3lE=
github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a h1:s4ryRQyC5HKZh6qkjNAFcvmD7gImK5bZuj/YZkXy1vw= github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a h1:s4ryRQyC5HKZh6qkjNAFcvmD7gImK5bZuj/YZkXy1vw=
github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a/go.mod h1:hhq4G4crv+nW2qXtNYcuzLeOudG92Ps37HEKeg2e3lE= github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a/go.mod h1:hhq4G4crv+nW2qXtNYcuzLeOudG92Ps37HEKeg2e3lE=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -487,6 +487,8 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/oracle/oci-go-sdk/v65 v65.1.0 h1:CtEPYXdFvv6H+zfYPfTT7DT/V/a5UsULkrj/AnzOtBc=
github.com/oracle/oci-go-sdk/v65 v65.1.0/go.mod h1:oyMrMa1vOzzKTmPN+kqrTR9y9kPA2tU1igN3NUSNTIE=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pborman/getopt v1.1.0/go.mod h1:FxXoW1Re00sQG/+KIkuSqRL/LwQgSkv7uyac+STFsbk= github.com/pborman/getopt v1.1.0/go.mod h1:FxXoW1Re00sQG/+KIkuSqRL/LwQgSkv7uyac+STFsbk=
@ -564,8 +566,6 @@ github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9A
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
@ -574,8 +574,6 @@ github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5J
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@ -590,6 +588,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg=
github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
@ -604,6 +604,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=