1
mirror of https://github.com/rclone/rclone synced 2024-11-21 22:50:16 +01:00

crypt: Implement RangeOption #1825

This commit is contained in:
Nick Craig-Wood 2018-01-22 18:31:59 +00:00
parent 451cd6d971
commit c929de9dc4
3 changed files with 288 additions and 51 deletions

View File

@ -14,6 +14,7 @@ import (
"unicode/utf8"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors"
@ -64,10 +65,11 @@ type ReadSeekCloser interface {
io.Reader
io.Seeker
io.Closer
fs.RangeSeeker
}
// OpenAtOffset opens the file handle at the offset given
type OpenAtOffset func(offset int64) (io.ReadCloser, error)
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
@ -84,7 +86,7 @@ type Cipher interface {
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(open OpenAtOffset, offset int64) (ReadSeekCloser, error)
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
@ -712,7 +714,8 @@ type decrypter struct {
bufIndex int
bufSize int
err error
open OpenAtOffset
limit int64 // limit of bytes to read, -1 for unlimited
open OpenRangeSeek
}
// newDecrypter creates a new file handle decrypting on the fly
@ -722,6 +725,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
c: c,
buf: c.getBlock(),
readBuf: c.getBlock(),
limit: -1,
}
// Read file header (magic + nonce)
readBuf := fh.readBuf[:fileHeaderSize]
@ -743,9 +747,24 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(open OpenAtOffset, offset int64) (fh *decrypter, err error) {
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
// Open initially with no seek
rc, err := open(0)
if offset == 0 && limit < 0 {
// If no offset or limit then open whole file
rc, err = open(0, -1)
} else if offset == 0 {
// If no offset open the header + limit worth of the file
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
setLimit = true
} else {
// Otherwise just read the header to start with
rc, err = open(0, int64(fileHeaderSize))
doRangeSeek = true
}
if err != nil {
return nil, err
}
@ -754,14 +773,17 @@ func (c *cipher) newDecrypterSeek(open OpenAtOffset, offset int64) (fh *decrypte
if err != nil {
return nil, err
}
fh.open = open // will be called by fh.Seek
if offset != 0 {
_, err = fh.Seek(offset, 0)
fh.open = open // will be called by fh.RangeSeek
if doRangeSeek {
_, err = fh.RangeSeek(offset, 0, limit)
if err != nil {
_ = fh.Close()
return nil, err
}
}
if setLimit {
fh.limit = limit
}
return fh, nil
}
@ -814,13 +836,66 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return 0, fh.finish(err)
}
}
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
toCopy := fh.bufSize - fh.bufIndex
if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit)
}
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n
if fh.limit >= 0 {
fh.limit -= int64(n)
if fh.limit == 0 {
return n, fh.finish(io.EOF)
}
}
return n, nil
}
// Seek as per io.Seeker
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
// calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying
// file.
//
// It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can
// be incremented.
func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit, discard, blocks int64) {
// blocks we need to seek, plus bytes we need to discard
blocks, discard = offset/blockDataSize, offset%blockDataSize
// Offset in underlying stream we need to seek
underlyingOffset = int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
// work out how many blocks we need to read
underlyingLimit = int64(-1)
if limit >= 0 {
// bytes to read beyond the first block
bytesToRead := limit - (blockDataSize - discard)
// Read the first block
blocksToRead := int64(1)
if bytesToRead > 0 {
// Blocks that need to be read plus left over blocks
extraBlocksToRead, endBytes := bytesToRead/blockDataSize, bytesToRead%blockDataSize
if endBytes != 0 {
// If left over bytes must read another block
extraBlocksToRead++
}
blocksToRead += extraBlocksToRead
}
// Must read a whole number of blocks
underlyingLimit = blocksToRead * (blockHeaderSize + blockDataSize)
}
return
}
// RangeSeek behaves like a call to Seek(offset int64, whence
// int) with the output wrapped in an io.LimitedReader
// limiting the total length to limit.
//
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
fh.mu.Lock()
defer fh.mu.Unlock()
@ -838,20 +913,16 @@ func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return 0, fh.err
}
// blocks we need to seek, plus bytes we need to discard
blocks, discard := offset/blockDataSize, offset%blockDataSize
// Offset in underlying stream we need to seek
underlyingOffset := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
underlyingOffset, underlyingLimit, discard, blocks := calculateUnderlying(offset, limit)
// Move the nonce on the correct number of blocks from the start
fh.nonce = fh.initialNonce
fh.nonce.add(uint64(blocks))
// Can we seek underlying stream directly?
if do, ok := fh.rc.(io.Seeker); ok {
if do, ok := fh.rc.(fs.RangeSeeker); ok {
// Seek underlying stream directly
_, err := do.Seek(underlyingOffset, 0)
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
if err != nil {
return 0, fh.finish(err)
}
@ -861,9 +932,9 @@ func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
fh.rc = nil
// Re-open the underlying object with the offset given
rc, err := fh.open(underlyingOffset)
rc, err := fh.open(underlyingOffset, underlyingLimit)
if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset"))
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
}
// Set the file handle
@ -882,9 +953,17 @@ func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
}
fh.bufIndex = int(discard)
// Set the limit
fh.limit = limit
return offset, nil
}
// Seek implements the io.Seeker interface
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return fh.RangeSeek(offset, whence, -1)
}
// finish sets the final error and tidies up
func (fh *decrypter) finish(err error) error {
if fh.err != nil {
@ -956,8 +1035,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(open OpenAtOffset, offset int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(open, offset)
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(open, offset, limit)
if err != nil {
return nil, err
}
@ -994,7 +1073,9 @@ func (c *cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces
var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil)
_ io.Reader = (*encrypter)(nil)
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil)
_ io.Reader = (*encrypter)(nil)
)

View File

@ -935,7 +935,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
assert.Equal(t, int64(16), n)
}
func TestNewDecrypterSeek(t *testing.T) {
func TestNewDecrypterSeekLimit(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
@ -956,42 +956,188 @@ func TestNewDecrypterSeek(t *testing.T) {
127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048, 2049,
4095, 4096, 4097, 8191, 8192, 8193, 16383, 16384, 16385, 32767, 32768, 32769,
65535, 65536, 65537, 131071, 131072, 131073, dataSize - 1, dataSize}
limits := []int{-1, 0, 1, 65535, 65536, 65537, 131071, 131072, 131073}
// Open stream with a seek of underlyingOffset
open := func(underlyingOffset int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):])), nil
var reader io.ReadCloser
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext)
if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
}
}
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
}
inBlock := make([]byte, 1024)
inBlock := make([]byte, dataSize)
// Check the seek worked by reading a block and checking it
// against what it should be
check := func(rc ReadSeekCloser, offset int) {
check := func(rc io.Reader, offset, limit int) {
n, err := io.ReadFull(rc, inBlock)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
require.NoError(t, err)
}
seekedDecrypted := inBlock[:n]
require.Equal(t, plaintext[offset:offset+n], seekedDecrypted)
what := fmt.Sprintf("offset = %d, limit = %d", offset, limit)
if limit >= 0 {
assert.Equal(t, limit, n, what)
}
require.Equal(t, plaintext[offset:offset+n], seekedDecrypted, what)
// We should have completely emptied the reader at this point
n, err = reader.Read(inBlock)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
}
// Now try decoding it with a open/seek
for _, offset := range trials {
rc, err := c.DecryptDataSeek(open, int64(offset))
assert.NoError(t, err)
for _, limit := range limits {
if offset+limit > len(plaintext) {
continue
}
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
assert.NoError(t, err)
check(rc, offset)
check(rc, offset, limit)
}
}
// Now try decoding it with a single open and lots of seeks
rc, err := c.DecryptDataSeek(open, 0)
// Try decoding it with a single open and lots of seeks
fh, err := c.DecryptDataSeek(open, 0, -1)
assert.NoError(t, err)
for _, offset := range trials {
_, err := rc.Seek(int64(offset), 0)
assert.NoError(t, err)
for _, limit := range limits {
if offset+limit > len(plaintext) {
continue
}
_, err := fh.RangeSeek(int64(offset), 0, int64(limit))
assert.NoError(t, err)
check(rc, offset)
check(fh, offset, limit)
}
}
// Do some checks on the open callback
for _, test := range []struct {
offset, limit int64
wantOffset, wantLimit int64
}{
// unlimited
{0, -1, int64(fileHeaderSize), -1},
{1, -1, int64(fileHeaderSize), -1},
{blockDataSize - 1, -1, int64(fileHeaderSize), -1},
{blockDataSize, -1, int64(fileHeaderSize) + blockSize, -1},
{blockDataSize + 1, -1, int64(fileHeaderSize) + blockSize, -1},
// limit=1
{0, 1, int64(fileHeaderSize), blockSize},
{1, 1, int64(fileHeaderSize), blockSize},
{blockDataSize - 1, 1, int64(fileHeaderSize), blockSize},
{blockDataSize, 1, int64(fileHeaderSize) + blockSize, blockSize},
{blockDataSize + 1, 1, int64(fileHeaderSize) + blockSize, blockSize},
// limit=100
{0, 100, int64(fileHeaderSize), blockSize},
{1, 100, int64(fileHeaderSize), blockSize},
{blockDataSize - 1, 100, int64(fileHeaderSize), 2 * blockSize},
{blockDataSize, 100, int64(fileHeaderSize) + blockSize, blockSize},
{blockDataSize + 1, 100, int64(fileHeaderSize) + blockSize, blockSize},
// limit=blockDataSize-1
{0, blockDataSize - 1, int64(fileHeaderSize), blockSize},
{1, blockDataSize - 1, int64(fileHeaderSize), blockSize},
{blockDataSize - 1, blockDataSize - 1, int64(fileHeaderSize), 2 * blockSize},
{blockDataSize, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize},
{blockDataSize + 1, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize},
// limit=blockDataSize
{0, blockDataSize, int64(fileHeaderSize), blockSize},
{1, blockDataSize, int64(fileHeaderSize), 2 * blockSize},
{blockDataSize - 1, blockDataSize, int64(fileHeaderSize), 2 * blockSize},
{blockDataSize, blockDataSize, int64(fileHeaderSize) + blockSize, blockSize},
{blockDataSize + 1, blockDataSize, int64(fileHeaderSize) + blockSize, 2 * blockSize},
// limit=blockDataSize+1
{0, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize},
{1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize},
{blockDataSize - 1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize},
{blockDataSize, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize},
{blockDataSize + 1, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize},
} {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
callCount := 0
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
switch callCount {
case 0:
assert.Equal(t, int64(0), underlyingOffset, what)
assert.Equal(t, int64(-1), underlyingLimit, what)
case 1:
assert.Equal(t, test.wantOffset, underlyingOffset, what)
assert.Equal(t, test.wantLimit, underlyingLimit, what)
default:
t.Errorf("Too many calls %d for %s", callCount+1, what)
}
callCount++
return open(underlyingOffset, underlyingLimit)
}
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(test.offset, 0, test.limit)
assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset)
}
}
func TestDecrypterCalculateUnderlying(t *testing.T) {
for _, test := range []struct {
offset, limit int64
wantOffset, wantLimit int64
wantDiscard, wantBlocks int64
}{
// unlimited
{0, -1, int64(fileHeaderSize), -1, 0, 0},
{1, -1, int64(fileHeaderSize), -1, 1, 0},
{blockDataSize - 1, -1, int64(fileHeaderSize), -1, blockDataSize - 1, 0},
{blockDataSize, -1, int64(fileHeaderSize) + blockSize, -1, 0, 1},
{blockDataSize + 1, -1, int64(fileHeaderSize) + blockSize, -1, 1, 1},
// limit=1
{0, 1, int64(fileHeaderSize), blockSize, 0, 0},
{1, 1, int64(fileHeaderSize), blockSize, 1, 0},
{blockDataSize - 1, 1, int64(fileHeaderSize), blockSize, blockDataSize - 1, 0},
{blockDataSize, 1, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
{blockDataSize + 1, 1, int64(fileHeaderSize) + blockSize, blockSize, 1, 1},
// limit=100
{0, 100, int64(fileHeaderSize), blockSize, 0, 0},
{1, 100, int64(fileHeaderSize), blockSize, 1, 0},
{blockDataSize - 1, 100, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
{blockDataSize, 100, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
{blockDataSize + 1, 100, int64(fileHeaderSize) + blockSize, blockSize, 1, 1},
// limit=blockDataSize-1
{0, blockDataSize - 1, int64(fileHeaderSize), blockSize, 0, 0},
{1, blockDataSize - 1, int64(fileHeaderSize), blockSize, 1, 0},
{blockDataSize - 1, blockDataSize - 1, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
{blockDataSize, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
{blockDataSize + 1, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize, 1, 1},
// limit=blockDataSize
{0, blockDataSize, int64(fileHeaderSize), blockSize, 0, 0},
{1, blockDataSize, int64(fileHeaderSize), 2 * blockSize, 1, 0},
{blockDataSize - 1, blockDataSize, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
{blockDataSize, blockDataSize, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
{blockDataSize + 1, blockDataSize, int64(fileHeaderSize) + blockSize, 2 * blockSize, 1, 1},
// limit=blockDataSize+1
{0, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize, 0, 0},
{1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize, 1, 0},
{blockDataSize - 1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
{blockDataSize, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize, 0, 1},
{blockDataSize + 1, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize, 1, 1},
} {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
underlyingOffset, underlyingLimit, discard, blocks := calculateUnderlying(test.offset, test.limit)
assert.Equal(t, test.wantOffset, underlyingOffset, what)
assert.Equal(t, test.wantLimit, underlyingLimit, what)
assert.Equal(t, test.wantDiscard, discard, what)
assert.Equal(t, test.wantBlocks, blocks, what)
}
}

View File

@ -573,29 +573,39 @@ func (o *Object) UnWrap() fs.Object {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset int64
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
// pass on Options to underlying open if appropriate
openOptions = append(openOptions, option)
}
}
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset int64) (io.ReadCloser, error) {
if underlyingOffset == 0 {
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek
return o.Object.Open()
return o.Object.Open(openOptions...)
}
// Open stream with a seek of underlyingOffset
return o.Object.Open(&fs.SeekOption{Offset: underlyingOffset})
}, offset)
// Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1)
if underlyingLimit >= 0 {
end = underlyingOffset + underlyingLimit - 1
if end >= o.Object.Size() {
end = -1
}
}
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(newOpenOptions...)
}, offset, limit)
if err != nil {
return nil, err
}
return rc, err
return rc, nil
}
// Update in to the object with the modTime given of the given size