mirror of
https://github.com/rclone/rclone.git
synced 2026-02-02 09:43:36 +00:00
Compare commits
235 Commits
pr-3782-un
...
fix-ssh-ds
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c4700f4bf1 | ||
|
|
e4f1e19127 | ||
|
|
4a1b644bfb | ||
|
|
8c9c86c3d6 | ||
|
|
8a58e0235d | ||
|
|
52b7337d28 | ||
|
|
995cd0dc32 | ||
|
|
5eb558e058 | ||
|
|
33d9310c49 | ||
|
|
aba89e2737 | ||
|
|
d685e7b4b5 | ||
|
|
9e4b68a364 | ||
|
|
044a3b3920 | ||
|
|
d342f9f942 | ||
|
|
e91b509578 | ||
|
|
8ddb3fbb2e | ||
|
|
b91e01fd22 | ||
|
|
177195aeeb | ||
|
|
cb5979a468 | ||
|
|
32507774de | ||
|
|
0ce662faad | ||
|
|
03b629064a | ||
|
|
962fbc8257 | ||
|
|
bd4b91bd57 | ||
|
|
10a6a92e52 | ||
|
|
54b16bd054 | ||
|
|
f21e97001b | ||
|
|
1f005a82ad | ||
|
|
bb65974e2f | ||
|
|
bc0f487369 | ||
|
|
e103c4c26a | ||
|
|
79d29bb41e | ||
|
|
c80b6d96dd | ||
|
|
97f6f8fe19 | ||
|
|
94920d39ae | ||
|
|
9403bd2990 | ||
|
|
e098924e61 | ||
|
|
437f9e2cef | ||
|
|
395f259978 | ||
|
|
b78adc9f03 | ||
|
|
b7c21310b4 | ||
|
|
c754e89906 | ||
|
|
62cfe3f384 | ||
|
|
afde340c9e | ||
|
|
d0ad83de4b | ||
|
|
d119bfd934 | ||
|
|
bdc91eda0f | ||
|
|
ef9e6794c2 | ||
|
|
4362ca7bb9 | ||
|
|
dcf945ed58 | ||
|
|
a86196a156 | ||
|
|
856c2b565f | ||
|
|
1a8c5708c5 | ||
|
|
14cab0fff0 | ||
|
|
69888bf966 | ||
|
|
d260238f99 | ||
|
|
6ca7198f57 | ||
|
|
cfcdc85b26 | ||
|
|
7f8d74e903 | ||
|
|
f2b1fedc4f | ||
|
|
b03cad3cf6 | ||
|
|
70db13e6e8 | ||
|
|
4c98360356 | ||
|
|
42f9f7fb5d | ||
|
|
ca1856724c | ||
|
|
b52a39a84e | ||
|
|
a97261c54f | ||
|
|
0f5579c0ba | ||
|
|
2f5a2d3c48 | ||
|
|
ba7f7c8319 | ||
|
|
77fb3c2511 | ||
|
|
74d9dabdff | ||
|
|
54edf38d0e | ||
|
|
22f06590f7 | ||
|
|
3b4c24af4e | ||
|
|
f37af9afec | ||
|
|
a3f0992a22 | ||
|
|
f555873f18 | ||
|
|
1c8eab81a5 | ||
|
|
cbc5af329f | ||
|
|
90d738b561 | ||
|
|
d80fdad6da | ||
|
|
e2916f3a55 | ||
|
|
1aa1a2c174 | ||
|
|
195d152785 | ||
|
|
1f61027f51 | ||
|
|
37a53570d4 | ||
|
|
ee7219aa20 | ||
|
|
b1d8da484b | ||
|
|
4e869e03f7 | ||
|
|
52c9647b06 | ||
|
|
7238ae18f9 | ||
|
|
551a829eba | ||
|
|
8e91f83174 | ||
|
|
7f776c64f0 | ||
|
|
8bf6ab2c52 | ||
|
|
75fc3fe389 | ||
|
|
c4572ebc91 | ||
|
|
e56976839a | ||
|
|
0c0ed2fe04 | ||
|
|
ab6ed256e5 | ||
|
|
7c98ecd3ab | ||
|
|
f6346a4d29 | ||
|
|
b502a74cff | ||
|
|
8e9c25063a | ||
|
|
1dced3b3c4 | ||
|
|
087bf1d584 | ||
|
|
e051a34fc1 | ||
|
|
f5455d865b | ||
|
|
b705ead3fd | ||
|
|
c390fc8100 | ||
|
|
14f6ce1e77 | ||
|
|
385542e2f9 | ||
|
|
fc946d0c44 | ||
|
|
854c84d0ca | ||
|
|
90bd0eb44c | ||
|
|
3130f870bb | ||
|
|
51b617f601 | ||
|
|
011ca244b2 | ||
|
|
9ea1361044 | ||
|
|
776966e22c | ||
|
|
01cb256b84 | ||
|
|
0b0163dde2 | ||
|
|
38123c70eb | ||
|
|
5cb7229a16 | ||
|
|
9bf3d3da4c | ||
|
|
93caa459e3 | ||
|
|
f8039deb7c | ||
|
|
86eaf43b00 | ||
|
|
8176202e6d | ||
|
|
1b74879b8b | ||
|
|
39319b4858 | ||
|
|
4af5c9aed7 | ||
|
|
8a3c4c6a7b | ||
|
|
d22e6f5a96 | ||
|
|
1648c1a0f3 | ||
|
|
44b1a591a8 | ||
|
|
bbb6f94377 | ||
|
|
3f654dac37 | ||
|
|
eed9c5738d | ||
|
|
fd39cbc193 | ||
|
|
b25f5eb0d1 | ||
|
|
0961763082 | ||
|
|
49e5299a95 | ||
|
|
07908f3f54 | ||
|
|
fdada79ebf | ||
|
|
7f15cc9556 | ||
|
|
cd3c699f28 | ||
|
|
36d2c46bcf | ||
|
|
1f50b70919 | ||
|
|
19db0df639 | ||
|
|
238f26cc90 | ||
|
|
268fcbb973 | ||
|
|
1e4589db18 | ||
|
|
31a1cc46b7 | ||
|
|
9d4b3580a5 | ||
|
|
b07bef2a6b | ||
|
|
705e60d0ad | ||
|
|
4c258787b5 | ||
|
|
58ea15078f | ||
|
|
756d47fb50 | ||
|
|
53874bd8ee | ||
|
|
e2bf91452a | ||
|
|
9eb17e4ade | ||
|
|
2c4aadb588 | ||
|
|
424554bc85 | ||
|
|
12a208a880 | ||
|
|
6893ce0bbf | ||
|
|
399cf18013 | ||
|
|
64b5105edd | ||
|
|
2c2f4a6a05 | ||
|
|
da41db4712 | ||
|
|
9f3449d944 | ||
|
|
ec8a884787 | ||
|
|
fc663d98d1 | ||
|
|
08c2cb784f | ||
|
|
3911a49256 | ||
|
|
2a62471e4c | ||
|
|
815ae7df45 | ||
|
|
ff0a299bfb | ||
|
|
5fa6a28f70 | ||
|
|
9a5178be7a | ||
|
|
66e08e0cc8 | ||
|
|
b5f78cd7b4 | ||
|
|
ef99ca68aa | ||
|
|
a5c2f2c138 | ||
|
|
b2c9ef23fa | ||
|
|
5f9be3dd05 | ||
|
|
b5f1bebc52 | ||
|
|
ad9c7ff7ed | ||
|
|
1af9fcbbfa | ||
|
|
6765303de4 | ||
|
|
304ee97944 | ||
|
|
d91a547d59 | ||
|
|
7d9ca3998e | ||
|
|
9aa32bc269 | ||
|
|
d9c8c47e02 | ||
|
|
243a868a5b | ||
|
|
6c351c15f8 | ||
|
|
45b63e2d45 | ||
|
|
32df634cb6 | ||
|
|
e569977c06 | ||
|
|
b49ab9f9c7 | ||
|
|
78a9e7440a | ||
|
|
93f5125f51 | ||
|
|
410e17a2ec | ||
|
|
23f7943645 | ||
|
|
1108895180 | ||
|
|
158870bcdb | ||
|
|
36717c7d98 | ||
|
|
1d3987bbbd | ||
|
|
472d4799d1 | ||
|
|
84caf1e158 | ||
|
|
72eba4dbb6 | ||
|
|
0f20f23651 | ||
|
|
47e2d5c415 | ||
|
|
1e9b8e043a | ||
|
|
eb0fc21533 | ||
|
|
a6a2eec392 | ||
|
|
c227a90b52 | ||
|
|
1e3d899db8 | ||
|
|
84369286df | ||
|
|
4c82b1f3c6 | ||
|
|
f94257115f | ||
|
|
77e94be280 | ||
|
|
37d5e75a56 | ||
|
|
dc06973796 | ||
|
|
b03462ab04 | ||
|
|
d4e87a841d | ||
|
|
6d0063d685 | ||
|
|
6fdd7149c1 | ||
|
|
fdb07f2f89 | ||
|
|
a433698b00 | ||
|
|
f14871caf7 | ||
|
|
132ce94139 |
@@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN make quicktest
|
||||
RUN \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
|
||||
|
||||
@@ -2,17 +2,20 @@
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -59,9 +59,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
|
||||
@@ -31,10 +31,12 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -35,6 +34,9 @@ import (
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -59,6 +61,8 @@ const (
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -125,6 +129,28 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -141,16 +167,19 @@ tiering blob to "Hot" or "Cool".`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -169,6 +198,7 @@ type Fs struct {
|
||||
cache *bucket.Cache // cache for container creation status
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a azure object
|
||||
@@ -257,6 +287,12 @@ var retryErrorCodes = []int{
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// FIXME interpret special errors - more to do here
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case "InvalidBlobOrBlock":
|
||||
// These errors happen sometimes in multipart uploads
|
||||
// because of block concurrency issues
|
||||
return true, err
|
||||
}
|
||||
statusCode := storageErr.Response().StatusCode
|
||||
for _, e := range retryErrorCodes {
|
||||
if statusCode == e {
|
||||
@@ -382,6 +418,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
cache: bucket.NewCache(),
|
||||
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -816,6 +858,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
container, _ := f.split(dir)
|
||||
@@ -825,6 +872,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Create(container, func() error {
|
||||
// If this is a SAS URL limited to a container then assume it is already created
|
||||
if f.isLimited {
|
||||
return nil
|
||||
}
|
||||
// now try to create the container
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
@@ -978,6 +1029,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
fs.Config.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1221,103 +1285,116 @@ type readSeeker struct {
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
// increment the slice passed in as LSB binary
|
||||
func increment(xs []byte) {
|
||||
for i, digit := range xs {
|
||||
newDigit := digit + 1
|
||||
xs[i] = newDigit
|
||||
if newDigit >= digit {
|
||||
// exit if no carry
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
var totalParts int64
|
||||
for {
|
||||
// Calculate number of parts
|
||||
var remainder int64
|
||||
totalParts, remainder = size/chunkSize, size%chunkSize
|
||||
if remainder != 0 {
|
||||
totalParts++
|
||||
totalParts := -1
|
||||
|
||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||
// blocks) and this is bigger than the max uncommitted block
|
||||
// size (9.52 TB) so we do not need to part commit block lists
|
||||
// or garbage collect uncommitted blocks.
|
||||
//
|
||||
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
|
||||
// 195GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
|
||||
})
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/chunkSize >= maxTotalParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
|
||||
}
|
||||
if totalParts < maxTotalParts {
|
||||
break
|
||||
}
|
||||
// Double chunk size if the number of parts is too big
|
||||
chunkSize *= 2
|
||||
if chunkSize > int64(maxChunkSize) {
|
||||
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
|
||||
}
|
||||
totalParts = int(size / chunkSize)
|
||||
if size%chunkSize != 0 {
|
||||
totalParts++
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
|
||||
// Utilities are cloned from above example
|
||||
// These helper functions convert a binary block ID to a base-64 string and vice versa
|
||||
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
|
||||
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
|
||||
// These helper functions convert an int block ID to a base-64 string and vice versa
|
||||
blockIDIntToBase64 := func(blockID uint64) string {
|
||||
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
|
||||
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
|
||||
return blockIDBinaryToBase64(binaryBlockID)
|
||||
}
|
||||
|
||||
// block ID variables
|
||||
var (
|
||||
rawID uint64
|
||||
blockID = "" // id in base64 encoded form
|
||||
blocks []string
|
||||
)
|
||||
|
||||
// increment the blockID
|
||||
nextID := func() {
|
||||
rawID++
|
||||
blockID = blockIDIntToBase64(rawID)
|
||||
blocks = append(blocks, blockID)
|
||||
}
|
||||
|
||||
// Get BlockBlobURL, we will use default pipeline here
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
ctx := context.Background()
|
||||
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < int(totalParts); part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = size // remaining size in file for logging only, -1 if size < 0
|
||||
position = int64(0) // position in file
|
||||
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
|
||||
finished = false // set when we have read EOF
|
||||
blocks []string // list of blocks for finalize
|
||||
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
|
||||
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||
)
|
||||
for part := 0; !finished; part++ {
|
||||
// Get a block of memory from the pool and a token which limits concurrency
|
||||
o.fs.uploadToken.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
free := func() {
|
||||
memPool.Put(buf) // return the buf
|
||||
o.fs.uploadToken.Put() // return the token
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
break outer
|
||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 { // end if no data
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||
blocks = append(blocks, blockID)
|
||||
|
||||
// Transfer the chunk
|
||||
nextID()
|
||||
wg.Add(1)
|
||||
o.fs.uploadToken.Get()
|
||||
go func(part int, position int64, blockID string) {
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
@@ -1329,28 +1406,19 @@ outer:
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
}(part, position, blockID)
|
||||
return nil
|
||||
})
|
||||
|
||||
// ready for next block
|
||||
remaining -= chunkSize
|
||||
if size >= 0 {
|
||||
remaining -= chunkSize
|
||||
}
|
||||
position += chunkSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1389,12 +1457,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1408,7 +1478,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
@@ -1418,7 +1488,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
@@ -1503,12 +1573,13 @@ func (o *Object) GetTier() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -16,3 +16,20 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []byte
|
||||
want []byte
|
||||
}{
|
||||
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||
} {
|
||||
increment(test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,8 +124,13 @@ minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files
|
||||
|
||||
Normally rclone will calculate the SHA1 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -668,7 +673,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
remote := file.Name[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
@@ -1370,6 +1375,21 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Clean the SHA1
|
||||
//
|
||||
// Make sure it is lower case
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) (out string) {
|
||||
out = strings.ToLower(sha1)
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(out, unverified) {
|
||||
out = out[len(unverified):]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
@@ -1385,12 +1405,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if o.sha1 == "" || o.sha1 == "none" {
|
||||
o.sha1 = Info[sha1Key]
|
||||
}
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(o.sha1, unverified) {
|
||||
o.sha1 = o.sha1[len(unverified):]
|
||||
}
|
||||
o.sha1 = cleanSHA1(o.sha1)
|
||||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
@@ -1648,6 +1663,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
o.sha1 = resp.Header.Get(sha1InfoHeader)
|
||||
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
|
||||
}
|
||||
o.sha1 = cleanSHA1(o.sha1)
|
||||
}
|
||||
// Don't check length or hash on partial content
|
||||
if resp.StatusCode == http.StatusPartialContent {
|
||||
@@ -1816,6 +1832,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: in,
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
|
||||
|
||||
@@ -184,13 +184,6 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -53,8 +53,7 @@ const (
|
||||
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootID = "0" // ID of root folder is always this
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.box.com/2.0"
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
@@ -89,22 +88,7 @@ func init() {
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
var err error
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
@@ -121,6 +105,11 @@ func init() {
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally.",
|
||||
@@ -163,6 +152,26 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
@@ -185,7 +194,6 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimS
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Iat: time.Now().Unix(),
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
@@ -236,6 +244,7 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -393,13 +402,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
// Get rootID
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
} else {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Get rootFolderID
|
||||
rootID := f.opt.RootFolderID
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
@@ -1167,7 +1190,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
@@ -1186,6 +1209,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
MultipartContentName: "contents",
|
||||
MultipartFileName: upload.Name,
|
||||
RootURL: uploadURL,
|
||||
Options: options,
|
||||
}
|
||||
// If object has an ID then it is existing so create a new version
|
||||
if o.id != "" {
|
||||
@@ -1227,9 +1251,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime)
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
|
||||
} else {
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func sha1Digest(digest []byte) string {
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
@@ -64,6 +64,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
||||
ContentType: "application/octet-stream",
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum[:]),
|
||||
},
|
||||
@@ -171,7 +172,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
@@ -236,7 +237,7 @@ outer:
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
|
||||
27
backend/cache/cache.go
vendored
27
backend/cache/cache.go
vendored
@@ -65,6 +65,7 @@ func init() {
|
||||
Name: "cache",
|
||||
Description: "Cache a remote",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
@@ -1882,6 +1883,31 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "stats",
|
||||
Short: "Print stats on the cache backend in JSON format.",
|
||||
},
|
||||
}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
switch name {
|
||||
case "stats":
|
||||
return f.Stats()
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1899,4 +1925,5 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
)
|
||||
|
||||
22
backend/cache/storage_persistent.go
vendored
22
backend/cache/storage_persistent.go
vendored
@@ -980,15 +980,6 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
})
|
||||
}
|
||||
|
||||
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
|
||||
// TO BE USED IN TESTING ONLY
|
||||
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
||||
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
|
||||
item.Started = true
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
||||
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -1036,19 +1027,6 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
||||
})
|
||||
}
|
||||
|
||||
// PurgeTempUploads will remove all the pending uploads from the queue
|
||||
// TO BE USED IN TESTING ONLY
|
||||
func (b *Persistent) PurgeTempUploads() {
|
||||
b.tempQueueMux.Lock()
|
||||
defer b.tempQueueMux.Unlock()
|
||||
|
||||
_ = b.db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Close should be called when the program ends gracefully
|
||||
func (b *Persistent) Close() {
|
||||
b.cleanupMux.Lock()
|
||||
|
||||
23
backend/cache/utils_test.go
vendored
Normal file
23
backend/cache/utils_test.go
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package cache
|
||||
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
// PurgeTempUploads will remove all the pending uploads from the queue
|
||||
func (b *Persistent) PurgeTempUploads() {
|
||||
b.tempQueueMux.Lock()
|
||||
defer b.tempQueueMux.Unlock()
|
||||
|
||||
_ = b.db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
|
||||
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
||||
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
|
||||
item.Started = true
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -71,30 +71,6 @@ type ReadSeekCloser interface {
|
||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
|
||||
|
||||
// Cipher is used to swap out the encryption implementations
|
||||
type Cipher interface {
|
||||
// EncryptFileName encrypts a file path
|
||||
EncryptFileName(string) string
|
||||
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
|
||||
DecryptFileName(string) (string, error)
|
||||
// EncryptDirName encrypts a directory path
|
||||
EncryptDirName(string) string
|
||||
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
|
||||
DecryptDirName(string) (string, error)
|
||||
// EncryptData
|
||||
EncryptData(io.Reader) (io.Reader, error)
|
||||
// DecryptData
|
||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||
// DecryptDataSeek decrypt at a given position
|
||||
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||
// EncryptedSize calculates the size of the data when encrypted
|
||||
EncryptedSize(int64) int64
|
||||
// DecryptedSize calculates the size of the data when decrypted
|
||||
DecryptedSize(int64) (int64, error)
|
||||
// NameEncryptionMode returns the used mode for name handling
|
||||
NameEncryptionMode() NameEncryptionMode
|
||||
}
|
||||
|
||||
// NameEncryptionMode is the type of file name encryption in use
|
||||
type NameEncryptionMode int
|
||||
|
||||
@@ -136,7 +112,8 @@ func (mode NameEncryptionMode) String() (out string) {
|
||||
return out
|
||||
}
|
||||
|
||||
type cipher struct {
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
@@ -148,8 +125,8 @@ type cipher struct {
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
|
||||
c := &cipher{
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
@@ -172,7 +149,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
//
|
||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
func (c *cipher) Key(password, salt string) (err error) {
|
||||
func (c *Cipher) Key(password, salt string) (err error) {
|
||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||
var saltBytes = defaultSalt
|
||||
if salt != "" {
|
||||
@@ -196,12 +173,12 @@ func (c *cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *cipher) getBlock() []byte {
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *cipher) putBlock(buf []byte) {
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
@@ -246,7 +223,7 @@ func decodeFileName(in string) ([]byte, error) {
|
||||
// This means that
|
||||
// * filenames with the same name will encrypt the same
|
||||
// * filenames which start the same won't have a common prefix
|
||||
func (c *cipher) encryptSegment(plaintext string) string {
|
||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
if plaintext == "" {
|
||||
return ""
|
||||
}
|
||||
@@ -256,7 +233,7 @@ func (c *cipher) encryptSegment(plaintext string) string {
|
||||
}
|
||||
|
||||
// decryptSegment decrypts a path segment
|
||||
func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
@@ -283,7 +260,7 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
}
|
||||
|
||||
// Simple obfuscation routines
|
||||
func (c *cipher) obfuscateSegment(plaintext string) string {
|
||||
func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
if plaintext == "" {
|
||||
return ""
|
||||
}
|
||||
@@ -370,7 +347,7 @@ func (c *cipher) obfuscateSegment(plaintext string) string {
|
||||
return result.String()
|
||||
}
|
||||
|
||||
func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
@@ -457,7 +434,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
}
|
||||
|
||||
// encryptFileName encrypts a file path
|
||||
func (c *cipher) encryptFileName(in string) string {
|
||||
func (c *Cipher) encryptFileName(in string) string {
|
||||
segments := strings.Split(in, "/")
|
||||
for i := range segments {
|
||||
// Skip directory name encryption if the user chose to
|
||||
@@ -475,7 +452,7 @@ func (c *cipher) encryptFileName(in string) string {
|
||||
}
|
||||
|
||||
// EncryptFileName encrypts a file path
|
||||
func (c *cipher) EncryptFileName(in string) string {
|
||||
func (c *Cipher) EncryptFileName(in string) string {
|
||||
if c.mode == NameEncryptionOff {
|
||||
return in + encryptedSuffix
|
||||
}
|
||||
@@ -483,7 +460,7 @@ func (c *cipher) EncryptFileName(in string) string {
|
||||
}
|
||||
|
||||
// EncryptDirName encrypts a directory path
|
||||
func (c *cipher) EncryptDirName(in string) string {
|
||||
func (c *Cipher) EncryptDirName(in string) string {
|
||||
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
|
||||
return in
|
||||
}
|
||||
@@ -491,7 +468,7 @@ func (c *cipher) EncryptDirName(in string) string {
|
||||
}
|
||||
|
||||
// decryptFileName decrypts a file path
|
||||
func (c *cipher) decryptFileName(in string) (string, error) {
|
||||
func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
segments := strings.Split(in, "/")
|
||||
for i := range segments {
|
||||
var err error
|
||||
@@ -514,7 +491,7 @@ func (c *cipher) decryptFileName(in string) (string, error) {
|
||||
}
|
||||
|
||||
// DecryptFileName decrypts a file path
|
||||
func (c *cipher) DecryptFileName(in string) (string, error) {
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||
@@ -526,14 +503,15 @@ func (c *cipher) DecryptFileName(in string) (string, error) {
|
||||
}
|
||||
|
||||
// DecryptDirName decrypts a directory path
|
||||
func (c *cipher) DecryptDirName(in string) (string, error) {
|
||||
func (c *Cipher) DecryptDirName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
|
||||
return in, nil
|
||||
}
|
||||
return c.decryptFileName(in)
|
||||
}
|
||||
|
||||
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
|
||||
// NameEncryptionMode returns the encryption mode in use for names
|
||||
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
|
||||
return c.mode
|
||||
}
|
||||
|
||||
@@ -601,7 +579,7 @@ func (n *nonce) add(x uint64) {
|
||||
type encrypter struct {
|
||||
mu sync.Mutex
|
||||
in io.Reader
|
||||
c *cipher
|
||||
c *Cipher
|
||||
nonce nonce
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
@@ -611,7 +589,7 @@ type encrypter struct {
|
||||
}
|
||||
|
||||
// newEncrypter creates a new file handle encrypting on the fly
|
||||
func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
fh := &encrypter{
|
||||
in: in,
|
||||
c: c,
|
||||
@@ -683,13 +661,19 @@ func (fh *encrypter) finish(err error) (int, error) {
|
||||
}
|
||||
|
||||
// Encrypt data encrypts the data stream
|
||||
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
|
||||
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) {
|
||||
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
|
||||
out, err := c.newEncrypter(in, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return wrap(out), nil // and wrap the accounting back on
|
||||
return wrap(out), out, nil // and wrap the accounting back on
|
||||
}
|
||||
|
||||
// EncryptData encrypts the data stream
|
||||
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
|
||||
out, _, err := c.encryptData(in)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// decrypter decrypts an io.ReaderCloser on the fly
|
||||
@@ -698,7 +682,7 @@ type decrypter struct {
|
||||
rc io.ReadCloser
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *cipher
|
||||
c *Cipher
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
bufIndex int
|
||||
@@ -709,7 +693,7 @@ type decrypter struct {
|
||||
}
|
||||
|
||||
// newDecrypter creates a new file handle decrypting on the fly
|
||||
func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
fh := &decrypter{
|
||||
rc: rc,
|
||||
c: c,
|
||||
@@ -737,7 +721,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
}
|
||||
|
||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
var rc io.ReadCloser
|
||||
doRangeSeek := false
|
||||
setLimit := false
|
||||
@@ -1012,7 +996,7 @@ func (fh *decrypter) finishAndClose(err error) error {
|
||||
}
|
||||
|
||||
// DecryptData decrypts the data stream
|
||||
func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
out, err := c.newDecrypter(rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1025,7 +1009,7 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1034,7 +1018,7 @@ func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
|
||||
}
|
||||
|
||||
// EncryptedSize calculates the size of the data when encrypted
|
||||
func (c *cipher) EncryptedSize(size int64) int64 {
|
||||
func (c *Cipher) EncryptedSize(size int64) int64 {
|
||||
blocks, residue := size/blockDataSize, size%blockDataSize
|
||||
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
|
||||
if residue != 0 {
|
||||
@@ -1044,7 +1028,7 @@ func (c *cipher) EncryptedSize(size int64) int64 {
|
||||
}
|
||||
|
||||
// DecryptedSize calculates the size of the data when decrypted
|
||||
func (c *cipher) DecryptedSize(size int64) (int64, error) {
|
||||
func (c *Cipher) DecryptedSize(size int64) (int64, error) {
|
||||
size -= int64(fileHeaderSize)
|
||||
if size < 0 {
|
||||
return 0, ErrorEncryptedFileTooShort
|
||||
@@ -1063,7 +1047,6 @@ func (c *cipher) DecryptedSize(size int64) (int64, error) {
|
||||
|
||||
// check interfaces
|
||||
var (
|
||||
_ Cipher = (*cipher)(nil)
|
||||
_ io.ReadCloser = (*decrypter)(nil)
|
||||
_ io.Seeker = (*decrypter)(nil)
|
||||
_ fs.RangeSeeker = (*decrypter)(nil)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -784,7 +785,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in := &errorReader{io.ErrUnexpectedEOF}
|
||||
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -793,14 +794,6 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
|
||||
type errorReader struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (er errorReader) Read(p []byte) (n int, err error) {
|
||||
return 0, er.err
|
||||
}
|
||||
|
||||
type closeDetector struct {
|
||||
io.Reader
|
||||
closed int
|
||||
@@ -838,7 +831,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
er := &errorReader{errors.New("potato")}
|
||||
er := &readers.ErrorReader{Err: errors.New("potato")}
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
@@ -864,7 +857,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in2 := &errorReader{io.ErrUnexpectedEOF}
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
in1 := bytes.NewBuffer(file16)
|
||||
in := ioutil.NopCloser(io.MultiReader(in1, in2))
|
||||
|
||||
@@ -1118,7 +1111,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
|
||||
// Test producing an error on the file on Read the underlying file
|
||||
in1 := bytes.NewBuffer(file1)
|
||||
in2 := &errorReader{errors.New("potato")}
|
||||
in2 := &readers.ErrorReader{Err: errors.New("potato")}
|
||||
in := io.MultiReader(in1, in2)
|
||||
cd := newCloseDetector(in)
|
||||
fh, err := c.newDecrypter(cd)
|
||||
|
||||
@@ -26,6 +26,7 @@ func init() {
|
||||
Name: "crypt",
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
@@ -90,7 +91,7 @@ names, or for debugging purposes.`,
|
||||
}
|
||||
|
||||
// newCipherForConfig constructs a Cipher for the given config name
|
||||
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -117,7 +118,7 @@ func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config
|
||||
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
func NewCipher(m configmap.Mapper) (*Cipher, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -203,7 +204,7 @@ type Fs struct {
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
cipher *Cipher
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -327,7 +328,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -351,7 +352,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -504,11 +505,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -561,6 +562,37 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
return f.cipher.DecryptFileName(encryptedFileName)
|
||||
}
|
||||
|
||||
// computeHashWithNonce takes the nonce and encrypts the contents of
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
// Open the src for input
|
||||
in, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
@@ -572,7 +604,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
d, err := f.cipher.(*cipher).newDecrypter(in)
|
||||
d, err := f.cipher.newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
@@ -597,30 +629,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
}
|
||||
|
||||
// Open the src for input
|
||||
in, err = src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
@@ -692,6 +701,67 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "encode",
|
||||
Short: "Encode the given filename(s)",
|
||||
Long: `This encodes the filenames given as arguments returning a list of
|
||||
strings of the encoded results.
|
||||
|
||||
Usage Example:
|
||||
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "decode",
|
||||
Short: "Decode the given filename(s)",
|
||||
Long: `This decodes the filenames given as arguments returning a list of
|
||||
strings of the decoded results. It will return an error if any of the
|
||||
inputs are invalid.
|
||||
|
||||
Usage Example:
|
||||
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "decode":
|
||||
out := make([]string, 0, len(arg))
|
||||
for _, encryptedFileName := range arg {
|
||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||
if err != nil {
|
||||
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||
}
|
||||
out = append(out, fileName)
|
||||
}
|
||||
return out, nil
|
||||
case "encode":
|
||||
out := make([]string, 0, len(arg))
|
||||
for _, fileName := range arg {
|
||||
encryptedFileName := f.EncryptFileName(fileName)
|
||||
out = append(out, encryptedFileName)
|
||||
}
|
||||
return out, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Object describes a wrapped for being read from the Fs
|
||||
//
|
||||
// This decrypts the remote name and decrypts the data
|
||||
@@ -833,13 +903,15 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
// This encrypts the remote name and adjusts the size
|
||||
type ObjectInfo struct {
|
||||
fs.ObjectInfo
|
||||
f *Fs
|
||||
f *Fs
|
||||
nonce nonce
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
|
||||
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo {
|
||||
return &ObjectInfo{
|
||||
ObjectInfo: src,
|
||||
f: f,
|
||||
nonce: nonce,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -865,6 +937,23 @@ func (o *ObjectInfo) Size() int64 {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
var srcObj fs.Object
|
||||
var ok bool
|
||||
// Get the underlying object if there is one
|
||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is a operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
return "", nil
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
if srcObj.Fs().Features().IsLocal {
|
||||
// Read the data and encrypt it to calculate the hash
|
||||
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
|
||||
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -903,6 +992,7 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
|
||||
143
backend/crypt/crypt_internal_test.go
Normal file
143
backend/crypt/crypt_internal_test.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testWrapper struct {
|
||||
fs.ObjectInfo
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||
// isn't wrapping anything
|
||||
func (o testWrapper) UnWrap() fs.Object {
|
||||
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||
return o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
localFs, err := fs.TemporaryLocalFs()
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
}
|
||||
return localFs, cleanup
|
||||
}
|
||||
|
||||
// Upload a file to a remote
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
require.NoError(t, obj.Remove(context.Background()))
|
||||
}
|
||||
return obj, cleanup
|
||||
}
|
||||
|
||||
// Test the ObjectInfo
|
||||
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
var (
|
||||
contents = random.String(100)
|
||||
path = "hash_test_object"
|
||||
ctx = context.Background()
|
||||
)
|
||||
if wrap {
|
||||
path = "_wrap"
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
|
||||
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupObj()
|
||||
|
||||
// encrypt the data
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
var outBuf bytes.Buffer
|
||||
enc, err := f.cipher.newEncrypter(inBuf, nil)
|
||||
require.NoError(t, err)
|
||||
nonce := enc.nonce // read the nonce at the start
|
||||
_, err = io.Copy(&outBuf, enc)
|
||||
require.NoError(t, err)
|
||||
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in a fs.ObjectUnwrapper if required
|
||||
oi = testWrapper{oi}
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
// saved from the encryptor
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
assert.Equal(t, f, src.Fs())
|
||||
assert.NotEqual(t, path, src.Remote())
|
||||
|
||||
// Test ObjectInfo.Hash
|
||||
wantHash := md5.Sum(outBuf.Bytes())
|
||||
gotHash, err := src.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
|
||||
}
|
||||
|
||||
func testComputeHash(t *testing.T, f *Fs) {
|
||||
var (
|
||||
contents = random.String(100)
|
||||
path = "compute_hash_test"
|
||||
ctx = context.Background()
|
||||
hashType = f.Fs.Hashes().GetOne()
|
||||
)
|
||||
|
||||
if hashType == hash.None {
|
||||
t.Skipf("%v: does not support hashes", f.Fs)
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
|
||||
// Upload a file to localFs as a test object
|
||||
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupLocalObj()
|
||||
|
||||
// Upload the same data to the remote Fs also
|
||||
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||
defer cleanupRemoteObj()
|
||||
|
||||
// Calculate the expected Hash of the remote object
|
||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test computed hash matches remote object hash
|
||||
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remoteObjHash, computedHash)
|
||||
}
|
||||
|
||||
// InternalTest is called by fstests.Run to extra tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
|
||||
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
|
||||
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
|
||||
}
|
||||
613
backend/drive/drive.go
Normal file → Executable file
613
backend/drive/drive.go
Normal file → Executable file
@@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -54,6 +55,7 @@ const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
|
||||
driveFolderType = "application/vnd.google-apps.folder"
|
||||
shortcutMimeType = "application/vnd.google-apps.shortcut"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
defaultMinSleep = fs.Duration(100 * time.Millisecond)
|
||||
@@ -65,7 +67,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -157,6 +159,7 @@ func init() {
|
||||
Name: "drive",
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
@@ -397,7 +400,7 @@ will download it anyway.`,
|
||||
Default: false,
|
||||
Help: `Show sizes as storage quota usage, not actual size.
|
||||
|
||||
Show the size of a file as the the storage quota used. This is the
|
||||
Show the size of a file as the storage quota used. This is the
|
||||
current version plus any older versions that have been set to keep
|
||||
forever.
|
||||
|
||||
@@ -467,6 +470,16 @@ Google don't document so it may break in the future.
|
||||
See: https://github.com/rclone/rclone/issues/3857
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_shortcuts",
|
||||
Help: `If set skip shortcut files
|
||||
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -524,6 +537,7 @@ type Options struct {
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -542,6 +556,8 @@ type Fs struct {
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
fileFields googleapi.Field // fields to fetch file info with
|
||||
m configmap.Mapper
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
@@ -551,6 +567,7 @@ type baseObject struct {
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents int // number of parents
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -616,6 +633,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
return true, err
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -642,17 +662,21 @@ func containsString(slice []string, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// getRootID returns the canonical ID for the "root" ID
|
||||
func (f *Fs) getRootID() (string, error) {
|
||||
var info *drive.File
|
||||
var err error
|
||||
// getFile returns drive.File for the ID passed and fields passed in
|
||||
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get("root").
|
||||
Fields("id").
|
||||
info, err = f.svc.Files.Get(ID).
|
||||
Fields(fields).
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
// getRootID returns the canonical ID for the "root" ID
|
||||
func (f *Fs) getRootID() (string, error) {
|
||||
info, err := f.getFile("root", "id")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||
}
|
||||
@@ -718,7 +742,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
query = append(query, titleQuery.String())
|
||||
}
|
||||
if directoriesOnly {
|
||||
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
|
||||
query = append(query, fmt.Sprintf("(mimeType='%s' or mimeType='%s')", driveFolderType, shortcutMimeType))
|
||||
}
|
||||
if filesOnly {
|
||||
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
|
||||
@@ -742,22 +766,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
|
||||
var fields = partialFields
|
||||
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
if f.opt.SizeAsQuota {
|
||||
fields += ",quotaBytesUsed"
|
||||
}
|
||||
|
||||
fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields)
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
@@ -774,6 +783,24 @@ OUTER:
|
||||
}
|
||||
for _, item := range files.Files {
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if isShortcut(item) {
|
||||
// ignore shortcuts if directed
|
||||
if f.opt.SkipShortcuts {
|
||||
continue
|
||||
}
|
||||
// skip file shortcuts if directory only
|
||||
if directoriesOnly && item.ShortcutDetails.TargetMimeType != driveFolderType {
|
||||
continue
|
||||
}
|
||||
// skip directory shortcuts if file only
|
||||
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
|
||||
continue
|
||||
}
|
||||
item, err = f.resolveShortcut(item)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "list")
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
if title != "" && title != item.Name {
|
||||
@@ -1056,8 +1083,10 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(opt),
|
||||
m: m,
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.fileFields = f.getFileFields()
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
@@ -1169,9 +1198,28 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
modifiedDate: modifiedDate,
|
||||
mimeType: info.MimeType,
|
||||
bytes: size,
|
||||
parents: len(info.Parents),
|
||||
}
|
||||
}
|
||||
|
||||
// getFileFields gets the fields for a normal file Get or List
|
||||
func (f *Fs) getFileFields() (fields googleapi.Field) {
|
||||
fields = partialFields
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
if f.opt.SizeAsQuota {
|
||||
fields += ",quotaBytesUsed"
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// newRegularObject creates a fs.Object for a normal drive.File
|
||||
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
@@ -1185,7 +1233,7 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
}
|
||||
return &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||
md5sum: strings.ToLower(info.Md5Checksum),
|
||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||
}
|
||||
@@ -1197,17 +1245,18 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
|
||||
id := actualID(info.Id)
|
||||
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
|
||||
if f.opt.AlternateExport {
|
||||
switch info.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.document":
|
||||
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.presentation":
|
||||
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
|
||||
}
|
||||
}
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
@@ -1265,23 +1314,37 @@ func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, erro
|
||||
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
info, err = f.resolveShortcut(info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new object")
|
||||
}
|
||||
switch {
|
||||
case info.MimeType == driveFolderType:
|
||||
return nil, fs.ErrorNotAFile
|
||||
case info.MimeType == shortcutMimeType:
|
||||
// We can only get here if f.opt.SkipShortcuts is set
|
||||
// and not from a listing. This is unlikely.
|
||||
fs.Debugf(remote, "Ignoring shortcut as skip shortcuts is set")
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case info.Md5Checksum != "" || info.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
return f.newRegularObject(remote, info), nil
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
|
||||
return nil, nil
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
default:
|
||||
// If item MimeType is in the ExportFormats then it is a google doc
|
||||
if !isDocument {
|
||||
fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
|
||||
return nil, nil
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if extension == "" {
|
||||
fs.Debugf(remote, "No export formats found for %q", info.MimeType)
|
||||
return nil, nil
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if isLinkMimeType(exportMimeType) {
|
||||
return f.newLinkObject(remote, info, extension, exportMimeType)
|
||||
@@ -1313,6 +1376,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
pathID = actualID(pathID)
|
||||
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
|
||||
if !f.opt.SkipGdocs {
|
||||
_, exportName, _, isDocument := f.findExportFormat(item)
|
||||
@@ -1506,6 +1570,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
var iErr error
|
||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
|
||||
@@ -1684,6 +1749,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -1697,11 +1763,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
select {
|
||||
case in <- listREntry{d.ID(), d.Remote()}:
|
||||
case in <- job:
|
||||
wg.Add(1)
|
||||
default:
|
||||
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||
overflow = append(overflow, job)
|
||||
}
|
||||
}
|
||||
listed++
|
||||
@@ -1778,10 +1845,87 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return nil
|
||||
}
|
||||
|
||||
const shortcutSeparator = '\t'
|
||||
|
||||
// joinID adds an actual drive ID to the shortcut ID it came from
|
||||
//
|
||||
// directoryIDs in the dircache are these composite directory IDs so
|
||||
// we must always unpack them before use.
|
||||
func joinID(actual, shortcut string) string {
|
||||
return actual + string(shortcutSeparator) + shortcut
|
||||
}
|
||||
|
||||
// splitID separates an actual ID and a shortcut ID from a composite
|
||||
// ID. If there was no shortcut ID then it will return "" for it.
|
||||
func splitID(compositeID string) (actualID, shortcutID string) {
|
||||
i := strings.IndexRune(compositeID, shortcutSeparator)
|
||||
if i < 0 {
|
||||
return compositeID, ""
|
||||
}
|
||||
return compositeID[:i], compositeID[i+1:]
|
||||
}
|
||||
|
||||
// isShortcutID returns true if compositeID refers to a shortcut
|
||||
func isShortcutID(compositeID string) bool {
|
||||
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
|
||||
}
|
||||
|
||||
// actualID returns an actual ID from a composite ID
|
||||
func actualID(compositeID string) (actualID string) {
|
||||
actualID, _ = splitID(compositeID)
|
||||
return actualID
|
||||
}
|
||||
|
||||
// shortcutID returns a shortcut ID from a composite ID if available,
|
||||
// or the actual ID if not.
|
||||
func shortcutID(compositeID string) (shortcutID string) {
|
||||
actualID, shortcutID := splitID(compositeID)
|
||||
if shortcutID != "" {
|
||||
return shortcutID
|
||||
}
|
||||
return actualID
|
||||
}
|
||||
|
||||
// isShortcut returns true of the item is a shortcut
|
||||
func isShortcut(item *drive.File) bool {
|
||||
return item.MimeType == shortcutMimeType && item.ShortcutDetails != nil
|
||||
}
|
||||
|
||||
// Dereference shortcut if required. It returns the newItem (which may
|
||||
// be just item).
|
||||
//
|
||||
// If we return a new item then the ID will be adjusted to be a
|
||||
// composite of the actual ID and the shortcut ID. This is to make
|
||||
// sure that we have decided in all use places what we are doing with
|
||||
// the ID.
|
||||
//
|
||||
// Note that we assume shortcuts can't point to shortcuts. Google
|
||||
// drive web interface doesn't offer the option to create a shortcut
|
||||
// to a shortcut. The documentation is silent on the issue.
|
||||
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
|
||||
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
|
||||
return item, nil
|
||||
}
|
||||
if item.ShortcutDetails == nil {
|
||||
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
|
||||
return item, nil
|
||||
}
|
||||
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to resolve shortcut")
|
||||
}
|
||||
// make sure we use the Name and Parents from the original item
|
||||
newItem.Name = item.Name
|
||||
newItem.Parents = item.Parents
|
||||
// the new ID is a composite ID
|
||||
newItem.Id = joinID(newItem.Id, item.Id)
|
||||
return newItem, nil
|
||||
}
|
||||
|
||||
// itemToDirEntry converts a drive.File to a fs.DirEntry.
|
||||
// When the drive.File cannot be represented as a fs.DirEntry
|
||||
// (nil, nil) is returned.
|
||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
|
||||
switch {
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
@@ -1792,7 +1936,11 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
default:
|
||||
return f.newObjectWithInfo(remote, item)
|
||||
entry, err = f.newObjectWithInfo(remote, item)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return entry, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1805,6 +1953,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
// Define the metadata for the file we are going to create.
|
||||
@@ -1908,6 +2057,18 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) < 2 {
|
||||
return nil
|
||||
}
|
||||
newDirs := dirs[:0]
|
||||
for _, dir := range dirs {
|
||||
if isShortcutID(dir.ID()) {
|
||||
fs.Infof(dir, "skipping shortcut directory")
|
||||
continue
|
||||
}
|
||||
newDirs = append(newDirs, dir)
|
||||
}
|
||||
dirs = newDirs
|
||||
if len(dirs) < 2 {
|
||||
return nil
|
||||
}
|
||||
@@ -1941,7 +2102,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.rmdir(ctx, srcDir.ID(), true)
|
||||
err = f.delete(ctx, srcDir.ID(), true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
@@ -1961,20 +2122,20 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes a directory unconditionally by ID
|
||||
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
|
||||
// delete a file or directory unconditionally by ID
|
||||
func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
if useTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
_, err = f.svc.Files.Update(directoryID, &info).
|
||||
_, err = f.svc.Files.Update(id, &info).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(directoryID).
|
||||
err = f.svc.Files.Delete(id).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
@@ -1993,6 +2154,11 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID, shortcutID := splitID(directoryID)
|
||||
// if directory is a shortcut remove it regardless
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2013,7 +2179,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// trash the directory if it had trashed files
|
||||
// in or the user wants to trash, otherwise
|
||||
// delete it.
|
||||
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
|
||||
err = f.delete(ctx, directoryID, trashedFiles || f.opt.UseTrash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2042,11 +2208,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
var srcObj *baseObject
|
||||
ext := ""
|
||||
readDescription := false
|
||||
switch src := src.(type) {
|
||||
case *Object:
|
||||
srcObj = &src.baseObject
|
||||
case *documentObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
readDescription = true
|
||||
case *linkObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
default:
|
||||
@@ -2070,9 +2238,28 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if readDescription {
|
||||
// preserve the description on copy for docs
|
||||
info, err := f.getFile(actualID(srcObj.id), "description")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read description for Google Doc")
|
||||
}
|
||||
// set the description if there is one, or use the default if not
|
||||
if info.Description != "" {
|
||||
createInfo.Description = info.Description
|
||||
}
|
||||
} else {
|
||||
// don't overwrite the description on copy for files
|
||||
// this should work for docs but it doesn't - it is probably a bug in Google Drive
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(srcObj.id, createInfo).
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
@@ -2111,23 +2298,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
if f.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.dirCache.RootID()).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
err = f.delete(ctx, shortcutID(f.dirCache.RootID()), f.opt.UseTrash)
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2233,6 +2404,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcParentID = actualID(srcParentID)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
|
||||
@@ -2245,7 +2417,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Do the move
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Update(srcObj.id, dstInfo).
|
||||
info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
|
||||
RemoveParents(srcParentID).
|
||||
AddParents(dstParents).
|
||||
Fields(partialFields).
|
||||
@@ -2265,13 +2437,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
id = shortcutID(id)
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
id = o.(fs.IDer).ID()
|
||||
id = shortcutID(o.(fs.IDer).ID())
|
||||
}
|
||||
|
||||
permission := &drive.Permission{
|
||||
@@ -2346,6 +2519,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryID = actualID(dstDirectoryID)
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -2369,19 +2543,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcDirectoryID = actualID(srcDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
patch := drive.File{
|
||||
Name: leaf,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.Update(srcID, &patch).
|
||||
_, err = f.svc.Files.Update(shortcutID(srcID), &patch).
|
||||
RemoveParents(srcDirectoryID).
|
||||
AddParents(dstDirectoryID).
|
||||
Fields("").
|
||||
@@ -2558,6 +2732,258 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
|
||||
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't convert chunk size to int")
|
||||
}
|
||||
chunkSize := fs.SizeSuffix(chunkSizeInt)
|
||||
if chunkSize == f.opt.ChunkSize {
|
||||
return nil
|
||||
}
|
||||
err = checkUploadChunkSize(chunkSize)
|
||||
if err == nil {
|
||||
f.opt.ChunkSize = chunkSize
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) changeServiceAccountFile(file string) (err error) {
|
||||
fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
|
||||
if file == f.opt.ServiceAccountFile {
|
||||
return nil
|
||||
}
|
||||
oldSvc := f.svc
|
||||
oldv2Svc := f.v2Svc
|
||||
oldOAuthClient := f.client
|
||||
oldFile := f.opt.ServiceAccountFile
|
||||
oldCredentials := f.opt.ServiceAccountCredentials
|
||||
defer func() {
|
||||
// Undo all the changes instead of doing selective undo's
|
||||
if err != nil {
|
||||
f.svc = oldSvc
|
||||
f.v2Svc = oldv2Svc
|
||||
f.client = oldOAuthClient
|
||||
f.opt.ServiceAccountFile = oldFile
|
||||
f.opt.ServiceAccountCredentials = oldCredentials
|
||||
}
|
||||
}()
|
||||
f.opt.ServiceAccountFile = file
|
||||
f.opt.ServiceAccountCredentials = ""
|
||||
oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "drive: failed when making oauth client")
|
||||
}
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't create Drive client")
|
||||
}
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't create Drive v2 client")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a shortcut from (f, srcPath) to (dstFs, dstPath)
|
||||
//
|
||||
// Will not overwrite existing files
|
||||
func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPath string) (o fs.Object, err error) {
|
||||
srcFs := f
|
||||
srcPath = strings.Trim(srcPath, "/")
|
||||
dstPath = strings.Trim(dstPath, "/")
|
||||
if dstPath == "" {
|
||||
return nil, errors.New("shortcut destination can't be root directory")
|
||||
}
|
||||
|
||||
// Find source
|
||||
var srcID string
|
||||
isDir := false
|
||||
if srcPath == "" {
|
||||
// source is root directory
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcID = f.dirCache.RootID()
|
||||
isDir = true
|
||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||
if err != fs.ErrorNotAFile {
|
||||
return nil, errors.Wrap(err, "can't find source")
|
||||
}
|
||||
// source was a directory
|
||||
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to find source dir")
|
||||
}
|
||||
isDir = true
|
||||
} else {
|
||||
// source was a file
|
||||
srcID = srcObj.(*Object).id
|
||||
}
|
||||
srcID = actualID(srcID) // link to underlying object not to shortcut
|
||||
|
||||
// Find destination
|
||||
_, err = dstFs.NewObject(ctx, dstPath)
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
if err == nil {
|
||||
err = errors.New("existing file")
|
||||
} else if err == fs.ErrorNotAFile {
|
||||
err = errors.New("existing directory")
|
||||
}
|
||||
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
||||
}
|
||||
|
||||
// Create destination shortcut
|
||||
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shortcut destination failed")
|
||||
}
|
||||
createInfo.MimeType = shortcutMimeType
|
||||
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
|
||||
TargetId: srcID,
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = dstFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = dstFs.svc.Files.Create(createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
|
||||
Do()
|
||||
return dstFs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shortcut creation failed")
|
||||
}
|
||||
if isDir {
|
||||
return nil, nil
|
||||
}
|
||||
return dstFs.newObjectWithInfo(dstPath, info)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
Long: `This is a get command which will be used to fetch the various drive config parameters
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "show the current upload chunk size",
|
||||
"service_account_file": "show the current service account file",
|
||||
},
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the drive config parameters",
|
||||
Long: `This is a set command which will be used to update the various drive config parameters
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "update the current upload chunk size",
|
||||
"service_account_file": "update the current service account file",
|
||||
},
|
||||
}, {
|
||||
Name: "shortcut",
|
||||
Short: "Create shortcuts from files or directories",
|
||||
Long: `This command creates shortcuts from files or directories.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
|
||||
In the first example this creates a shortcut from the "source_item"
|
||||
which can be a file or a directory to the "destination_shortcut". The
|
||||
"source_item" and the "destination_shortcut" should be relative paths
|
||||
from "drive:"
|
||||
|
||||
In the second example this creates a shortcut from the "source_item"
|
||||
relative to "drive:" to the "destination_shortcut" relative to
|
||||
"drive2:". This may fail with a permission error if the user
|
||||
authenticated with "drive2:" can't read files from "drive:".
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"target": "optional target remote for the shortcut destination",
|
||||
},
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "get":
|
||||
out := make(map[string]string)
|
||||
if _, ok := opt["service_account_file"]; ok {
|
||||
out["service_account_file"] = f.opt.ServiceAccountFile
|
||||
}
|
||||
if _, ok := opt["chunk_size"]; ok {
|
||||
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
}
|
||||
return out, nil
|
||||
case "set":
|
||||
out := make(map[string]map[string]string)
|
||||
if serviceAccountFile, ok := opt["service_account_file"]; ok {
|
||||
serviceAccountMap := make(map[string]string)
|
||||
serviceAccountMap["previous"] = f.opt.ServiceAccountFile
|
||||
if err = f.changeServiceAccountFile(serviceAccountFile); err != nil {
|
||||
return out, err
|
||||
}
|
||||
f.m.Set("service_account_file", serviceAccountFile)
|
||||
serviceAccountMap["current"] = f.opt.ServiceAccountFile
|
||||
out["service_account_file"] = serviceAccountMap
|
||||
}
|
||||
if chunkSize, ok := opt["chunk_size"]; ok {
|
||||
chunkSizeMap := make(map[string]string)
|
||||
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
if err = f.changeChunkSize(chunkSize); err != nil {
|
||||
return out, err
|
||||
}
|
||||
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
f.m.Set("chunk_size", chunkSizeString)
|
||||
chunkSizeMap["current"] = chunkSizeString
|
||||
out["chunk_size"] = chunkSizeMap
|
||||
}
|
||||
return out, nil
|
||||
case "shortcut":
|
||||
if len(arg) != 2 {
|
||||
return nil, errors.New("need exactly 2 arguments")
|
||||
}
|
||||
dstFs := f
|
||||
target, ok := opt["target"]
|
||||
if ok {
|
||||
targetFs, err := cache.Get(target)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't find target")
|
||||
}
|
||||
dstFs, ok = targetFs.(*Fs)
|
||||
if !ok {
|
||||
return nil, errors.New("target is not a drive backend")
|
||||
}
|
||||
}
|
||||
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -2618,8 +3044,9 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
}
|
||||
return nil, "", "", "", false, err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
|
||||
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool {
|
||||
if !f.opt.SkipGdocs {
|
||||
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
||||
if exportName == leaf {
|
||||
@@ -2669,7 +3096,7 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
var info *drive.File
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
|
||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
@@ -2798,7 +3225,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if o.v2Download {
|
||||
var v2File *drive_v2.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
v2File, err = o.fs.v2Svc.Files.Get(o.id).
|
||||
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
|
||||
Fields("downloadUrl").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
@@ -2877,7 +3304,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
|
||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||
Media(in, googleapi.ContentType(uploadMimeType)).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
@@ -2897,6 +3324,26 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
// If o is a shortcut
|
||||
if isShortcutID(o.id) {
|
||||
// Delete it first
|
||||
err := o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Then put the file as a new file
|
||||
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Update the object
|
||||
if newO, ok := newObj.(*Object); ok {
|
||||
*o = *newO
|
||||
} else {
|
||||
fs.Debugf(newObj, "Failed to update object %T from new object %T", o, newObj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
updateInfo := &drive.File{
|
||||
MimeType: srcMimeType,
|
||||
@@ -2967,25 +3414,10 @@ func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo
|
||||
|
||||
// Remove an object
|
||||
func (o *baseObject) Remove(ctx context.Context) error {
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if o.fs.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
_, err = o.fs.svc.Files.Update(o.id, &info).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
} else {
|
||||
err = o.fs.svc.Files.Delete(o.id).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
}
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
if o.parents > 1 {
|
||||
return errors.New("can't delete safely - has multiple parents")
|
||||
}
|
||||
return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
@@ -3047,6 +3479,7 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -268,6 +269,98 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = "file name.txt"
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
ctx := context.Background()
|
||||
srcObj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
srcHash, err := srcObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, "", srcHash)
|
||||
t.Run("Errors", func(t *testing.T) {
|
||||
_, err := f.makeShortcut(ctx, "", f, "")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't be root")
|
||||
|
||||
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't find source")
|
||||
|
||||
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not overwriting")
|
||||
assert.Contains(t, err.Error(), "existing file")
|
||||
|
||||
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not overwriting")
|
||||
assert.Contains(t, err.Error(), "existing directory")
|
||||
})
|
||||
t.Run("File", func(t *testing.T) {
|
||||
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dstObj)
|
||||
assert.Equal(t, "shortcut.txt", dstObj.Remote())
|
||||
dstHash, err := dstObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, srcHash, dstHash)
|
||||
require.NoError(t, dstObj.Remove(ctx))
|
||||
})
|
||||
t.Run("Dir", func(t *testing.T) {
|
||||
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, dstObj)
|
||||
entries, err := f.List(ctx, "shortcutdir")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(entries))
|
||||
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
|
||||
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
|
||||
})
|
||||
t.Run("Command", func(t *testing.T) {
|
||||
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "need exactly 2 arguments")
|
||||
|
||||
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
|
||||
"target": "doesnotexistremote:",
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "couldn't find target")
|
||||
|
||||
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
|
||||
"target": ".",
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "target is not a drive backend")
|
||||
|
||||
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
|
||||
"target": fs.ConfigString(f),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
dstObj := dstObjI.(*Object)
|
||||
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
|
||||
dstHash, err := dstObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, srcHash, dstHash)
|
||||
require.NoError(t, dstObj.Remove(ctx))
|
||||
|
||||
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
|
||||
require.NoError(t, err)
|
||||
dstObj = dstObjI.(*Object)
|
||||
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
|
||||
dstHash, err = dstObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, srcHash, dstHash)
|
||||
require.NoError(t, dstObj.Remove(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -282,6 +375,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
})
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
6
backend/dropbox/dropbox.go
Normal file → Executable file
6
backend/dropbox/dropbox.go
Normal file → Executable file
@@ -225,7 +225,11 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
// First check for Insufficient Space
|
||||
if strings.Contains(baseErrString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
403, // Forbidden (may happen when request limit is exceeded)
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
@@ -320,7 +321,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||
@@ -338,6 +339,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
NoResponse: true,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
Options: options,
|
||||
MultipartContentName: "file[]",
|
||||
MultipartFileName: fileName,
|
||||
MultipartParams: map[string][]string{
|
||||
|
||||
@@ -23,11 +23,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = "0"
|
||||
apiBaseURL = "https://api.1fichier.com/v1"
|
||||
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
|
||||
maxSleep = 5 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootID = "0"
|
||||
apiBaseURL = "https://api.1fichier.com/v1"
|
||||
minSleep = 400 * time.Millisecond // api is extremely rate limited now
|
||||
maxSleep = 5 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
attackConstant = 0 // start with max sleep
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -185,7 +186,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
@@ -338,7 +339,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -242,6 +242,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "COLDLINE",
|
||||
Help: "Coldline storage class",
|
||||
}, {
|
||||
Value: "ARCHIVE",
|
||||
Help: "Archive storage class",
|
||||
}, {
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
@@ -555,7 +558,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
@@ -1063,6 +1066,33 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
object.CacheControl = value
|
||||
case "content-disposition":
|
||||
object.ContentDisposition = value
|
||||
case "content-encoding":
|
||||
object.ContentEncoding = value
|
||||
case "content-language":
|
||||
object.ContentLanguage = value
|
||||
case "content-type":
|
||||
object.ContentType = value
|
||||
default:
|
||||
const googMetaPrefix = "x-goog-meta-"
|
||||
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
||||
metaKey := lowerKey[len(googMetaPrefix):]
|
||||
object.Metadata[metaKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
|
||||
@@ -134,14 +134,20 @@ rclone mount needs to know the size of files in advance of reading
|
||||
them, so setting this flag when using rclone mount is recommended if
|
||||
you want to read the media.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "start_year",
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -202,6 +208,11 @@ func (f *Fs) dirTime() time.Time {
|
||||
return f.startTime
|
||||
}
|
||||
|
||||
// startYear returns the start year
|
||||
func (f *Fs) startYear() int {
|
||||
return f.opt.StartYear
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
@@ -224,6 +235,10 @@ func errorHandler(resp *http.Response) error {
|
||||
if err != nil {
|
||||
body = nil
|
||||
}
|
||||
// Google sends 404 messages as images so be prepared for that
|
||||
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||
body = []byte("Image not found or broken")
|
||||
}
|
||||
var e = api.Error{
|
||||
Details: api.ErrorDetails{
|
||||
Code: resp.StatusCode,
|
||||
@@ -943,8 +958,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload the media item in exchange for an UploadToken
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/uploads",
|
||||
Method: "POST",
|
||||
Path: "/uploads",
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"X-Goog-Upload-File-Name": fileName,
|
||||
"X-Goog-Upload-Protocol": "raw",
|
||||
|
||||
@@ -23,6 +23,7 @@ type lister interface {
|
||||
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
startYear() int
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
@@ -222,11 +223,10 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// Return the years from 2000 to today
|
||||
// FIXME make configurable?
|
||||
// Return the years from startYear to today
|
||||
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
currentYear := f.dirTime().Year()
|
||||
for year := 2000; year <= currentYear; year++ {
|
||||
for year := f.startYear(); year <= currentYear; year++ {
|
||||
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
|
||||
@@ -59,6 +59,11 @@ func (f *testLister) dirTime() time.Time {
|
||||
return startTime
|
||||
}
|
||||
|
||||
// mock startYear for testing
|
||||
func (f *testLister) startYear() int {
|
||||
return 2000
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
|
||||
@@ -166,8 +166,7 @@ func TestNewObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject(context.Background(), "not found.txt")
|
||||
|
||||
@@ -164,6 +164,12 @@ type CustomerInfo struct {
|
||||
IOSHash string `json:"ios_hash"`
|
||||
}
|
||||
|
||||
// TrashResponse is returned when emptying the Trash
|
||||
type TrashResponse struct {
|
||||
Folders int64 `json:"folders"`
|
||||
Files int64 `json:"files"`
|
||||
}
|
||||
|
||||
// XML structures returned by the old API
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
|
||||
@@ -140,6 +140,11 @@ func init() {
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "trashed_only",
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
@@ -174,6 +179,7 @@ type Options struct {
|
||||
Device string `config:"device"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
@@ -519,6 +525,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||
f.features.ListR = nil
|
||||
}
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
@@ -638,13 +647,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
if result.Deleted {
|
||||
if bool(result.Deleted) && !f.opt.TrashedOnly {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
for i := range result.Folders {
|
||||
item := &result.Folders[i]
|
||||
if item.Deleted {
|
||||
if !f.opt.TrashedOnly && bool(item.Deleted) {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
@@ -654,8 +663,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
for i := range result.Files {
|
||||
item := &result.Files[i]
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
if f.opt.TrashedOnly {
|
||||
if !item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
@@ -1049,6 +1064,22 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/purge_trash",
|
||||
}
|
||||
|
||||
var info api.TrashResponse
|
||||
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't empty trash")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
@@ -1122,7 +1153,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Deleted {
|
||||
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
@@ -1259,6 +1290,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
@@ -1355,6 +1387,7 @@ var (
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -41,6 +41,7 @@ func init() {
|
||||
Name: "local",
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
@@ -697,6 +698,50 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Supported()
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "noop",
|
||||
Short: "A null operation for testing backend commands",
|
||||
Long: `This is a test command which has some options
|
||||
you can try to change the output.`,
|
||||
Opts: map[string]string{
|
||||
"echo": "echo the input arguments",
|
||||
"error": "return an error based on option value",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
switch name {
|
||||
case "noop":
|
||||
if txt, ok := opt["error"]; ok {
|
||||
if txt == "" {
|
||||
txt = "unspecified error"
|
||||
}
|
||||
return nil, errors.New(txt)
|
||||
}
|
||||
if _, ok := opt["echo"]; ok {
|
||||
out := map[string]interface{}{}
|
||||
out["name"] = name
|
||||
out["arg"] = arg
|
||||
out["opt"] = opt
|
||||
return out, nil
|
||||
}
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -723,8 +768,17 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
oldtime := o.modTime
|
||||
oldsize := o.size
|
||||
err := o.lstat()
|
||||
var changed bool
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
// If file not found then we assume any accumulated
|
||||
// hashes are OK - this will error on Open
|
||||
changed = true
|
||||
} else {
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
}
|
||||
} else {
|
||||
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
|
||||
}
|
||||
|
||||
o.fs.objectHashesMu.Lock()
|
||||
@@ -732,7 +786,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
hashValue, hashFound := o.hashes[r]
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
|
||||
if changed || hashes == nil || !hashFound {
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
@@ -977,7 +1031,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), f)
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
@@ -1064,12 +1118,12 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
return nil, err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(size, out)
|
||||
err = file.PreAllocate(size, out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = setSparse(out)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
@@ -1164,6 +1218,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
//+build !windows,!linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// setSparse makes the file be a sparse file
|
||||
func setSparse(out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
77
backend/onedrive/onedrive.go
Normal file → Executable file
77
backend/onedrive/onedrive.go
Normal file → Executable file
@@ -184,6 +184,28 @@ func init() {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
|
||||
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
|
||||
if opts.Path == "/me/drives" {
|
||||
opts.Path = "/me/drive"
|
||||
meDrive := driveResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
found := false
|
||||
for _, drive := range drives.Drives {
|
||||
if drive.DriveID == meDrive.DriveID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// add the me drive if not found already
|
||||
if !found {
|
||||
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
|
||||
drives.Drives = append(drives.Drives, meDrive)
|
||||
}
|
||||
}
|
||||
|
||||
if len(drives.Drives) == 0 {
|
||||
log.Fatalf("No drives found")
|
||||
} else {
|
||||
@@ -226,8 +248,9 @@ func init() {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and
|
||||
should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
|
||||
Note that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -251,6 +274,16 @@ delete OneNote files or otherwise want them to show up in directory
|
||||
listing, set this option.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
|
||||
|
||||
This can be useful if you wish to do a server side copy between two
|
||||
different Onedrives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work between any two
|
||||
configurations.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -303,11 +336,12 @@ listing, set this option.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
@@ -402,6 +436,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
|
||||
}
|
||||
}
|
||||
case 507: // Insufficient Storage
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
}
|
||||
return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
@@ -576,6 +612,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
@@ -988,10 +1025,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
// Check we aren't overwriting a file on the same remote
|
||||
if srcObj.fs == f {
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
@@ -1572,7 +1612,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
@@ -1585,6 +1625,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
ContentLength: &toSend,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
|
||||
Body: chunk,
|
||||
Options: options,
|
||||
}
|
||||
_, _ = chunk.Seek(skip, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
@@ -1642,7 +1683,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
@@ -1693,7 +1734,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
||||
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1706,7 +1747,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
|
||||
// Update the content of a remote file within 4MB size in one single request
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
}
|
||||
@@ -1723,6 +1764,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1730,6 +1772,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1771,9 +1814,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 {
|
||||
info, err = o.uploadMultipart(ctx, in, size, modTime)
|
||||
info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
|
||||
} else if size == 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, size, modTime)
|
||||
info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
@@ -687,8 +687,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
Name: leaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload/create_file.json",
|
||||
Method: "POST",
|
||||
Options: options,
|
||||
Path: "/upload/create_file.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
@@ -970,8 +971,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size}
|
||||
// fs.Debugf(nil, "PreOpen: %#v", openUploadData)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload/open_file_upload.json",
|
||||
Method: "POST",
|
||||
Options: options,
|
||||
Path: "/upload/open_file_upload.json",
|
||||
}
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -41,8 +41,7 @@ const (
|
||||
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootID = "d0" // ID of root folder is always this
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.pcloud.com"
|
||||
)
|
||||
|
||||
@@ -89,13 +88,19 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
@@ -265,7 +270,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return err
|
||||
})
|
||||
|
||||
// Get rootID
|
||||
// Get rootFolderID
|
||||
rootID := f.opt.RootFolderID
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
@@ -1074,6 +1080,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentLength: &size,
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Options: options,
|
||||
}
|
||||
leaf = o.fs.opt.Enc.FromStandardName(leaf)
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
|
||||
@@ -517,7 +517,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
@@ -1002,6 +1002,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/folder/uploadinfo",
|
||||
Parameters: o.fs.baseParams(),
|
||||
Options: options,
|
||||
MultipartParams: url.Values{
|
||||
"id": {directoryID},
|
||||
},
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -34,7 +35,8 @@ type Fs struct {
|
||||
client *putio.Client // client for making API calls to Put.io
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
oAuthClient *http.Client
|
||||
httpClient *http.Client // base http client
|
||||
oAuthClient *http.Client // http client with oauth Authorization
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -59,6 +61,12 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a putio 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
@@ -68,7 +76,9 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
|
||||
root = parsePath(root)
|
||||
httpClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure putio")
|
||||
}
|
||||
@@ -78,6 +88,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
client: putio.NewClient(oAuthClient),
|
||||
httpClient: httpClient,
|
||||
oAuthClient: oAuthClient,
|
||||
}
|
||||
p.features = (&fs.Features{
|
||||
@@ -253,7 +264,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
|
||||
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx), options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -273,7 +284,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
return f.newObjectWithInfo(ctx, remote, entry)
|
||||
}
|
||||
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
|
||||
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
|
||||
@@ -288,6 +299,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
|
||||
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
|
||||
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
retry, err := shouldRetry(err)
|
||||
if retry {
|
||||
|
||||
@@ -241,7 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
req.Header.Set(header, value)
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
|
||||
@@ -864,6 +864,76 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// cleanUpBucket removes all pending multipart uploads for a given bucket
|
||||
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket)
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxLimit := int(listLimitSize)
|
||||
var marker *string
|
||||
for {
|
||||
req := qs.ListMultipartUploadsInput{
|
||||
Limit: &maxLimit,
|
||||
KeyMarker: marker,
|
||||
}
|
||||
var resp *qs.ListMultipartUploadsOutput
|
||||
resp, err = bucketInit.ListMultipartUploads(&req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "clean up bucket list multipart uploads")
|
||||
}
|
||||
for _, upload := range resp.Uploads {
|
||||
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
|
||||
age := time.Since(*upload.Created)
|
||||
if age > 24*time.Hour {
|
||||
fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: upload.UploadID,
|
||||
}
|
||||
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
|
||||
if abortErr != nil {
|
||||
err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
|
||||
}
|
||||
}
|
||||
}
|
||||
if resp.HasMore != nil && !*resp.HasMore {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" {
|
||||
fs.Errorf(f, "Expecting NextKeyMarker but didn't find one")
|
||||
break
|
||||
} else {
|
||||
marker = resp.NextKeyMarker
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
if f.rootBucket != "" {
|
||||
return f.cleanUpBucket(ctx, f.rootBucket)
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
@@ -1090,9 +1160,10 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -341,12 +341,17 @@ func (mu *multiUploader) abort() error {
|
||||
}
|
||||
|
||||
// multiPartUpload upload a multiple object into QingStor
|
||||
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var err error
|
||||
//Initiate an multi-part upload
|
||||
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
|
||||
// Initiate an multi-part upload
|
||||
if err = mu.initiate(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// Abort the transfer if returning an error
|
||||
if err != nil {
|
||||
_ = mu.abort()
|
||||
}
|
||||
}()
|
||||
|
||||
ch := make(chan chunk, mu.cfg.concurrency)
|
||||
for i := 0; i < mu.cfg.concurrency; i++ {
|
||||
@@ -400,9 +405,5 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
// Complete Multipart Upload
|
||||
err = mu.complete()
|
||||
if mu.getErr() != nil || err != nil {
|
||||
_ = mu.abort()
|
||||
}
|
||||
return err
|
||||
return mu.complete()
|
||||
}
|
||||
|
||||
169
backend/s3/s3.go
169
backend/s3/s3.go
@@ -641,7 +641,7 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -652,10 +652,22 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "aws:kms",
|
||||
Help: "aws:kms",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: "AES256",
|
||||
Help: "AES256",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: "If using KMS ID you must provide the ARN of Key.",
|
||||
Provider: "AWS",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -663,6 +675,24 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "arn:aws:kms:us-east-1:*",
|
||||
Help: "arn:aws:kms:*",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: "If using SSE-C you must provide the secret encyption key used to encrypt/decrypt your data.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_md5",
|
||||
Help: "If using SSE-C you must provide the secret encryption key MD5 checksum.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
@@ -754,8 +784,13 @@ The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -889,6 +924,9 @@ type Options struct {
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
@@ -908,19 +946,18 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
poolMu sync.Mutex // mutex protecting memory pools map
|
||||
pools map[int64]*pool.Pool // memory pools
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -1099,7 +1136,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(fs.Config.LowLevelRetries).
|
||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
@@ -1206,20 +1243,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
|
||||
// Setting it to 1 because in context of pacer it means 1 attempt.
|
||||
pc.SetRetries(1)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
ses: ses,
|
||||
pacer: pc,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
pools: make(map[int64]*pool.Pool),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
opt.UploadConcurrency*fs.Config.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
|
||||
f.setRoot(root)
|
||||
@@ -1469,7 +1506,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
@@ -1701,12 +1738,12 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
|
||||
}
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}, func() (bool, error) {
|
||||
return f.bucketExists(ctx, bucket)
|
||||
})
|
||||
@@ -1910,19 +1947,16 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
|
||||
_, ok := f.pools[size]
|
||||
if !ok {
|
||||
f.pools[size] = pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(f.opt.ChunkSize),
|
||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
return f.pools[size]
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -2089,22 +2123,35 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
}
|
||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
}
|
||||
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
httpReq, resp := o.fs.c.GetObjectRequest(&req)
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, value := option.Header()
|
||||
req.Range = &value
|
||||
case *fs.HTTPOption:
|
||||
key, value := option.Header()
|
||||
httpReq.HTTPRequest.Header.Add(key, value)
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
var resp *s3.GetObjectOutput
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
|
||||
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
|
||||
err = httpReq.Send()
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -2206,9 +2253,16 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
tokens.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
free := func() {
|
||||
// return the memory and token
|
||||
memPool.Put(buf)
|
||||
tokens.Put()
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
@@ -2217,10 +2271,12 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 && partNum != 1 { // end if no data and if not first chunk
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
@@ -2229,6 +2285,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
|
||||
off += int64(n)
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
partLength := int64(len(buf))
|
||||
|
||||
// create checksum of buffer for integrity checking
|
||||
@@ -2266,11 +2323,6 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
// return the memory and token
|
||||
memPool.Put(buf[:partSize])
|
||||
tokens.Put()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
@@ -2356,6 +2408,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
}
|
||||
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
@@ -2402,6 +2463,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.HTTPOption:
|
||||
key, value := option.Header()
|
||||
httpReq.Header.Add(key, value)
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
|
||||
153
backend/seafile/api/types.go
Normal file
153
backend/seafile/api/types.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package api
|
||||
|
||||
// Some api objects are duplicated with only small differences,
|
||||
// it's because the returned JSON objects are very inconsistent between api calls
|
||||
|
||||
// AuthenticationRequest contains user credentials
|
||||
type AuthenticationRequest struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
// AuthenticationResult is returned by a call to the authentication api
|
||||
type AuthenticationResult struct {
|
||||
Token string `json:"token"`
|
||||
Errors []string `json:"non_field_errors"`
|
||||
}
|
||||
|
||||
// AccountInfo contains simple user properties
|
||||
type AccountInfo struct {
|
||||
Usage int64 `json:"usage"`
|
||||
Total int64 `json:"total"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// ServerInfo contains server information
|
||||
type ServerInfo struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// DefaultLibrary when none specified
|
||||
type DefaultLibrary struct {
|
||||
ID string `json:"repo_id"`
|
||||
Exists bool `json:"exists"`
|
||||
}
|
||||
|
||||
// CreateLibraryRequest contains the information needed to create a library
|
||||
type CreateLibraryRequest struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"desc"`
|
||||
Password string `json:"passwd"`
|
||||
}
|
||||
|
||||
// Library properties. Please note not all properties are going to be useful for rclone
|
||||
type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
// CreateLibrary properties. Seafile is not consistent and returns different types for different API calls
|
||||
type CreateLibrary struct {
|
||||
ID string `json:"repo_id"`
|
||||
Name string `json:"repo_name"`
|
||||
}
|
||||
|
||||
// FileType is either "dir" or "file"
|
||||
type FileType string
|
||||
|
||||
// File types
|
||||
var (
|
||||
FileTypeDir FileType = "dir"
|
||||
FileTypeFile FileType = "file"
|
||||
)
|
||||
|
||||
// FileDetail contains file properties (for older api v2.0)
|
||||
type FileDetail struct {
|
||||
ID string `json:"id"`
|
||||
Type FileType `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Parent string `json:"parent_dir"`
|
||||
Modified string `json:"last_modified"`
|
||||
}
|
||||
|
||||
// DirEntries contains a list of DirEntry
|
||||
type DirEntries struct {
|
||||
Entries []DirEntry `json:"dirent_list"`
|
||||
}
|
||||
|
||||
// DirEntry contains a directory entry
|
||||
type DirEntry struct {
|
||||
ID string `json:"id"`
|
||||
Type FileType `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Path string `json:"parent_dir"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
// Operation is move, copy or rename
|
||||
type Operation string
|
||||
|
||||
// Operations
|
||||
var (
|
||||
CopyFileOperation Operation = "copy"
|
||||
MoveFileOperation Operation = "move"
|
||||
RenameFileOperation Operation = "rename"
|
||||
)
|
||||
|
||||
// FileOperationRequest is sent to the api to copy, move or rename a file
|
||||
type FileOperationRequest struct {
|
||||
Operation Operation `json:"operation"`
|
||||
DestinationLibraryID string `json:"dst_repo"` // For copy/move operation
|
||||
DestinationPath string `json:"dst_dir"` // For copy/move operation
|
||||
NewName string `json:"newname"` // Only to be used by the rename operation
|
||||
}
|
||||
|
||||
// FileInfo is returned by a server file copy/move/rename (new api v2.1)
|
||||
type FileInfo struct {
|
||||
Type string `json:"type"`
|
||||
LibraryID string `json:"repo_id"`
|
||||
Path string `json:"parent_dir"`
|
||||
Name string `json:"obj_name"`
|
||||
ID string `json:"obj_id"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// CreateDirRequest only contain an operation field
|
||||
type CreateDirRequest struct {
|
||||
Operation string `json:"operation"`
|
||||
}
|
||||
|
||||
// DirectoryDetail contains the directory details specific to the getDirectoryDetails call
|
||||
type DirectoryDetail struct {
|
||||
ID string `json:"repo_id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// ShareLinkRequest contains the information needed to create or list shared links
|
||||
type ShareLinkRequest struct {
|
||||
LibraryID string `json:"repo_id"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// SharedLink contains the information returned by a call to shared link creation
|
||||
type SharedLink struct {
|
||||
Link string `json:"link"`
|
||||
IsExpired bool `json:"is_expired"`
|
||||
}
|
||||
|
||||
// BatchSourceDestRequest contains JSON parameters for sending a batch copy or move operation
|
||||
type BatchSourceDestRequest struct {
|
||||
SrcLibraryID string `json:"src_repo_id"`
|
||||
SrcParentDir string `json:"src_parent_dir"`
|
||||
SrcItems []string `json:"src_dirents"`
|
||||
DstLibraryID string `json:"dst_repo_id"`
|
||||
DstParentDir string `json:"dst_parent_dir"`
|
||||
}
|
||||
127
backend/seafile/object.go
Normal file
127
backend/seafile/object.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a seafile object (also commonly called a file)
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
id string // internal ID of object
|
||||
remote string // The remote path (full path containing library name if target at root)
|
||||
pathInLibrary string // Path of the object without the library name
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
libraryID string // Needed to download the file
|
||||
}
|
||||
|
||||
// ==================== Interface fs.DirEntry ====================
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote string
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns last modified time
|
||||
func (o *Object) ModTime(context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ==================== Interface fs.ObjectInfo ====================
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ==================== Interface fs.Object ====================
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
downloadLink, err := o.fs.getDownloadLink(ctx, o.libraryID, o.pathInLibrary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reader, err := o.fs.download(ctx, downloadLink, o.Size(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
// The upload sometimes return a temporary 500 error
|
||||
// We cannot use the pacer to retry uploading the file as the upload link is single use only
|
||||
for retry := 0; retry <= 3; retry++ {
|
||||
uploadLink, err := o.fs.getUploadLink(ctx, o.libraryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploaded, err := o.fs.upload(ctx, in, uploadLink, o.pathInLibrary)
|
||||
if err == ErrorInternalDuringUpload {
|
||||
// This is a temporary error, try again with a new upload link
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set the properties from the upload back to the object
|
||||
o.size = uploaded.Size
|
||||
o.id = uploaded.ID
|
||||
|
||||
return nil
|
||||
}
|
||||
return ErrorInternalDuringUpload
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteFile(ctx, o.libraryID, o.pathInLibrary)
|
||||
}
|
||||
|
||||
// ==================== Optional Interface fs.IDer ====================
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
67
backend/seafile/pacer.go
Normal file
67
backend/seafile/pacer.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Use only one pacer per server URL
|
||||
var (
|
||||
pacers map[string]*fs.Pacer
|
||||
pacerMutex sync.Mutex
|
||||
)
|
||||
|
||||
func init() {
|
||||
pacers = make(map[string]*fs.Pacer, 0)
|
||||
}
|
||||
|
||||
// getPacer returns the unique pacer for that remote URL
|
||||
func getPacer(remote string) *fs.Pacer {
|
||||
pacerMutex.Lock()
|
||||
defer pacerMutex.Unlock()
|
||||
|
||||
remote = parseRemote(remote)
|
||||
if existing, found := pacers[remote]; found {
|
||||
return existing
|
||||
}
|
||||
|
||||
pacers[remote] = fs.NewPacer(
|
||||
pacer.NewDefault(
|
||||
pacer.MinSleep(minSleep),
|
||||
pacer.MaxSleep(maxSleep),
|
||||
pacer.DecayConstant(decayConstant),
|
||||
),
|
||||
)
|
||||
return pacers[remote]
|
||||
}
|
||||
|
||||
// parseRemote formats a remote url into "hostname:port"
|
||||
func parseRemote(remote string) string {
|
||||
remoteURL, err := url.Parse(remote)
|
||||
if err != nil {
|
||||
// Return a default value in the very unlikely event we're not going to parse remote
|
||||
fs.Infof(nil, "Cannot parse remote %s", remote)
|
||||
return "default"
|
||||
}
|
||||
host := remoteURL.Hostname()
|
||||
port := remoteURL.Port()
|
||||
if port == "" {
|
||||
if remoteURL.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = "80"
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", host, port)
|
||||
}
|
||||
1247
backend/seafile/seafile.go
Normal file
1247
backend/seafile/seafile.go
Normal file
File diff suppressed because it is too large
Load Diff
123
backend/seafile/seafile_internal_test.go
Normal file
123
backend/seafile/seafile_internal_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type pathData struct {
|
||||
configLibrary string // Library specified in the config
|
||||
configRoot string // Root directory specified in the config
|
||||
argumentPath string // Path given as an argument in the command line
|
||||
expectedLibrary string
|
||||
expectedPath string
|
||||
}
|
||||
|
||||
// Test the method to split a library name and a path
|
||||
// from a mix of configuration data and path command line argument
|
||||
func TestSplitPath(t *testing.T) {
|
||||
testData := []pathData{
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "Library",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("Library", "path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "path",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "root",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: path.Join("subpath", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path", "subpath", "to", "file"),
|
||||
},
|
||||
}
|
||||
for _, test := range testData {
|
||||
fs := &Fs{
|
||||
libraryName: test.configLibrary,
|
||||
rootDirectory: test.configRoot,
|
||||
}
|
||||
libraryName, path := fs.splitPath(test.argumentPath)
|
||||
|
||||
assert.Equal(t, test.expectedLibrary, libraryName)
|
||||
assert.Equal(t, test.expectedPath, path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitPathIntoSlice(t *testing.T) {
|
||||
testData := map[string][]string{
|
||||
"1": {"1"},
|
||||
"/1": {"1"},
|
||||
"/1/": {"1"},
|
||||
"1/2/3": {"1", "2", "3"},
|
||||
}
|
||||
for input, expected := range testData {
|
||||
output := splitPath(input)
|
||||
assert.Equal(t, expected, output)
|
||||
}
|
||||
}
|
||||
17
backend/seafile/seafile_test.go
Normal file
17
backend/seafile/seafile_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Seafile filesystem interface
|
||||
package seafile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/seafile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSeafile:",
|
||||
NilObject: (*seafile.Object)(nil),
|
||||
})
|
||||
}
|
||||
1083
backend/seafile/webapi.go
Normal file
1083
backend/seafile/webapi.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1102,19 +1102,18 @@ func (o *Object) stat() error {
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.opt.SetModTime {
|
||||
return nil
|
||||
if o.fs.opt.SetModTime {
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
err = o.stat()
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime stat failed")
|
||||
}
|
||||
|
||||
@@ -1429,8 +1429,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var resp *http.Response
|
||||
var info api.UploadSpecification
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/Items(" + directoryID + ")/Upload2",
|
||||
Method: "POST",
|
||||
Path: "/Items(" + directoryID + ")/Upload2",
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info)
|
||||
|
||||
@@ -733,7 +733,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
@@ -1320,6 +1320,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
RootURL: o.id,
|
||||
Path: "/data",
|
||||
NoResponse: true,
|
||||
Options: options,
|
||||
Body: in,
|
||||
}
|
||||
if size >= 0 {
|
||||
|
||||
@@ -1285,6 +1285,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
m.SetModTime(modTime)
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
headers := m.ObjectHeaders()
|
||||
fs.OpenOptionAddHeaders(options, headers)
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
|
||||
684
backend/tardigrade/fs.go
Normal file
684
backend/tardigrade/fs.go
Normal file
@@ -0,0 +1,684 @@
|
||||
// +build go1.13,!plan9
|
||||
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
package tardigrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
const (
|
||||
existingProvider = "existing"
|
||||
newProvider = "new"
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, configMapper configmap.Mapper) {
|
||||
provider, _ := configMapper.Get(fs.ConfigProvider)
|
||||
|
||||
config.FileDeleteKey(name, fs.ConfigProvider)
|
||||
|
||||
if provider == newProvider {
|
||||
satelliteString, _ := configMapper.Get("satellite_address")
|
||||
apiKey, _ := configMapper.Get("api_key")
|
||||
passphrase, _ := configMapper.Get("passphrase")
|
||||
|
||||
// satelliteString contains always default and passphrase can be empty
|
||||
if apiKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
satellite, found := satMap[satelliteString]
|
||||
if !found {
|
||||
satellite = satelliteString
|
||||
}
|
||||
|
||||
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't create access grant: %v", err)
|
||||
}
|
||||
|
||||
serialziedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||
}
|
||||
configMapper.Set("satellite_address", satellite)
|
||||
configMapper.Set("access_grant", serialziedAccess)
|
||||
} else if provider == existingProvider {
|
||||
config.FileDeleteKey(name, "satellite_address")
|
||||
config.FileDeleteKey(name, "api_key")
|
||||
config.FileDeleteKey(name, "passphrase")
|
||||
} else {
|
||||
log.Fatalf("Invalid provider type: %s", provider)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Required: true,
|
||||
Default: existingProvider,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "existing",
|
||||
Help: "Use an existing access grant.",
|
||||
}, {
|
||||
Value: newProvider,
|
||||
Help: "Create a new access grant from satellite address, API key, and passphrase.",
|
||||
},
|
||||
}},
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access Grant.",
|
||||
Required: false,
|
||||
Provider: "existing",
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
Default: "us-central-1.tardigrade.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-central-1.tardigrade.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "europe-west-1.tardigrade.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "asia-east-1.tardigrade.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API Key.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Access string `config:"access_grant"`
|
||||
|
||||
SatelliteAddress string `config:"satellite_address"`
|
||||
APIKey string `config:"api_key"`
|
||||
Passphrase string `config:"passphrase"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to Tardigrade
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the filesystem
|
||||
|
||||
opts Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
|
||||
access *uplink.Access // parsed scope
|
||||
|
||||
project *uplink.Project // project client
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Tardigrade.
|
||||
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Setup filesystem and connection to Tardigrade
|
||||
root = norm.NFC.String(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
}
|
||||
|
||||
// Parse config into Options struct
|
||||
err = configstruct.Set(m, &f.opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse access
|
||||
var access *uplink.Access
|
||||
|
||||
if f.opts.Access != "" {
|
||||
access, err = uplink.ParseAccess(f.opts.Access)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
|
||||
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil {
|
||||
return nil, errors.New("access not found")
|
||||
}
|
||||
|
||||
f.access = access
|
||||
|
||||
f.features = (&fs.Features{
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
|
||||
project, err := f.connect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.project = project
|
||||
|
||||
// Root validation needs to check the following: If a bucket path is
|
||||
// specified and exists, then the object must be a directory.
|
||||
//
|
||||
// NOTE: At this point this must return the filesystem object we've
|
||||
// created so far even if there is an error.
|
||||
if root != "" {
|
||||
bucketName, bucketPath := bucket.Split(root)
|
||||
|
||||
if bucketName != "" && bucketPath != "" {
|
||||
_, err = project.StatBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return f, errors.Wrap(err, "tardigrade: bucket")
|
||||
}
|
||||
|
||||
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err == nil {
|
||||
if !object.IsPrefix {
|
||||
// If the root is actually a file we
|
||||
// need to return the *parent*
|
||||
// directory of the root instead and an
|
||||
// error that the original root
|
||||
// requested is a file.
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.root = newRoot
|
||||
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// connect opens a connection to Tardigrade.
|
||||
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
fs.Debugf(f, "connecting...")
|
||||
defer fs.Debugf(f, "connected: %+v", err)
|
||||
|
||||
cfg := uplink.Config{}
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: project")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// absolute computes the absolute bucket name and path from the filesystem root
|
||||
// and the relative path provided.
|
||||
func (f *Fs) absolute(relative string) (bucketName, bucketPath string) {
|
||||
bn, bp := bucket.Split(path.Join(f.root, relative))
|
||||
|
||||
// NOTE: Technically libuplink does not care about the encoding. It is
|
||||
// happy to work with them as opaque byte sequences. However, rclone
|
||||
// has a test that requires two paths with the same normalized form
|
||||
// (but different un-normalized forms) to point to the same file. This
|
||||
// means we have to normalize before we interact with libuplink.
|
||||
return norm.NFC.String(bn), norm.NFC.String(bp)
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FS sj://%s", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in relative into entries. The entries can
|
||||
// be returned in any order but should be for a complete directory.
|
||||
//
|
||||
// relative should be "" to list the root, and should not have trailing
|
||||
// slashes.
|
||||
//
|
||||
// This should return fs.ErrDirNotFound if the directory isn't found.
|
||||
func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "ls ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
defer func() {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}()
|
||||
|
||||
if bucketName == "" {
|
||||
if bucketPath != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
|
||||
return f.listObjects(ctx, relative, bucketName, bucketPath)
|
||||
}
|
||||
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "BKT ls")
|
||||
|
||||
buckets := f.project.ListBuckets(ctx, nil)
|
||||
|
||||
for buckets.Next() {
|
||||
bucket := buckets.Item()
|
||||
|
||||
entries = append(entries, fs.NewDir(bucket.Name, bucket.Created))
|
||||
}
|
||||
|
||||
return entries, buckets.Err()
|
||||
}
|
||||
|
||||
// newDirEntry creates a directory entry from an uplink object.
|
||||
//
|
||||
// NOTE: Getting the exact behavior required by rclone is somewhat tricky. The
|
||||
// path manipulation here is necessary to cover all the different ways the
|
||||
// filesystem and object could be initialized and combined.
|
||||
func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry {
|
||||
if object.IsPrefix {
|
||||
// . The entry must include the relative path as its prefix. Depending on
|
||||
// | what is being listed and how the filesystem root was initialized the
|
||||
// | relative path may be empty (and so we use path joining here to ensure
|
||||
// | we don't end up with an empty path segment).
|
||||
// |
|
||||
// | . Remove the prefix used during listing.
|
||||
// | |
|
||||
// | | . Remove the trailing slash.
|
||||
// | | |
|
||||
// v v v
|
||||
return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created)
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, relative, object)
|
||||
}
|
||||
|
||||
func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath)
|
||||
|
||||
opts := &uplink.ListObjectsOptions{
|
||||
Prefix: newPrefix(bucketPath),
|
||||
|
||||
System: true,
|
||||
Custom: true,
|
||||
}
|
||||
fs.Debugf(f, "opts %+v", opts)
|
||||
|
||||
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||
|
||||
for objects.Next() {
|
||||
entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item()))
|
||||
}
|
||||
|
||||
err = objects.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting from dir
|
||||
// recursively into out.
|
||||
//
|
||||
// relative should be "" to start from the root, and should not have trailing
|
||||
// slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read. These need not be
|
||||
// returned in any particular order. If callback returns an error then the
|
||||
// listing will stop immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way of listing
|
||||
// recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "ls -R ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
defer func() {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}()
|
||||
|
||||
if bucketName == "" {
|
||||
if bucketPath != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
|
||||
return f.listBucketsR(ctx, callback)
|
||||
}
|
||||
|
||||
return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback)
|
||||
}
|
||||
|
||||
func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "BKT ls -R")
|
||||
|
||||
buckets := f.project.ListBuckets(ctx, nil)
|
||||
|
||||
for buckets.Next() {
|
||||
bucket := buckets.Item()
|
||||
|
||||
err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return buckets.Err()
|
||||
}
|
||||
|
||||
func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath)
|
||||
|
||||
opts := &uplink.ListObjectsOptions{
|
||||
Prefix: newPrefix(bucketPath),
|
||||
Recursive: true,
|
||||
|
||||
System: true,
|
||||
Custom: true,
|
||||
}
|
||||
|
||||
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||
|
||||
for objects.Next() {
|
||||
object := objects.Item()
|
||||
|
||||
err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = objects.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at relative. If it can't be found it returns the
|
||||
// error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) {
|
||||
fs.Debugf(f, "stat ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
object, err := f.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "err: %+v", err)
|
||||
|
||||
if errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, relative, object), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should
|
||||
// either return an error or upload it properly (rather than e.g. calling
|
||||
// panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so will return the
|
||||
// object and the error, otherwise will return nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
|
||||
|
||||
// Reject options we don't support.
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Errorf(f, "Unsupported mandatory option: %v", option)
|
||||
|
||||
return nil, errors.New("unsupported mandatory option")
|
||||
}
|
||||
}
|
||||
|
||||
bucketName, bucketPath := f.absolute(src.Remote())
|
||||
|
||||
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{
|
||||
"rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = io.Copy(upload, in)
|
||||
if err != nil {
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = upload.Commit()
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
// Rclone assumes the backend will create the bucket if not existing yet.
|
||||
// Here we create the bucket and return a retry error for rclone to retry the upload.
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, "", upload.Info()), nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate
|
||||
// size.
|
||||
//
|
||||
// May create the object even if it returns an error - if so will return the
|
||||
// object and the error, otherwise will return nil and the error.
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) {
|
||||
fs.Debugf(f, "mkdir -p ./%s", relative)
|
||||
|
||||
bucketName, _ := f.absolute(relative)
|
||||
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket)
|
||||
//
|
||||
// NOTE: Despite code documentation to the contrary, this method should not
|
||||
// return an error if the directory does not exist.
|
||||
func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
|
||||
fs.Debugf(f, "rmdir ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
if bucketPath != "" {
|
||||
// If we can successfully stat it, then it is an object (and not a prefix).
|
||||
_, err := f.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
// At this point we know it is not an object,
|
||||
// but we don't know if it is a prefix for one.
|
||||
//
|
||||
// We check this by doing a listing and if we
|
||||
// get any results back, then we know this is a
|
||||
// valid prefix (which implies the directory is
|
||||
// not empty).
|
||||
opts := &uplink.ListObjectsOptions{
|
||||
Prefix: newPrefix(bucketPath),
|
||||
|
||||
System: true,
|
||||
Custom: true,
|
||||
}
|
||||
|
||||
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||
|
||||
if objects.Next() {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return objects.Err()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
|
||||
_, err = f.project.DeleteBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
if errors.Is(err, uplink.ErrBucketNotEmpty) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPrefix returns a new prefix for listing conforming to the libuplink
|
||||
// requirements. In particular, libuplink requires a trailing slash for
|
||||
// listings, but rclone does not always provide one. Further, depending on how
|
||||
// the path was initially path normalization may have removed it (e.g. a
|
||||
// trailing slash from the CLI is removed before it ever get's to the backend
|
||||
// code).
|
||||
func newPrefix(prefix string) string {
|
||||
if prefix == "" {
|
||||
return prefix
|
||||
}
|
||||
|
||||
if prefix[len(prefix)-1] == '/' {
|
||||
return prefix
|
||||
}
|
||||
|
||||
return prefix + "/"
|
||||
}
|
||||
204
backend/tardigrade/object.go
Normal file
204
backend/tardigrade/object.go
Normal file
@@ -0,0 +1,204 @@
|
||||
// +build go1.13,!plan9
|
||||
|
||||
package tardigrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// Object describes a Tardigrade object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
|
||||
absolute string
|
||||
|
||||
size int64
|
||||
created time.Time
|
||||
modified time.Time
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var _ fs.Object = &Object{}
|
||||
|
||||
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
|
||||
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
|
||||
// Attempt to use the modified time from the metadata. Otherwise
|
||||
// fallback to the server time.
|
||||
modified := object.System.Created
|
||||
|
||||
if modifiedStr, ok := object.Custom["rclone:mtime"]; ok {
|
||||
var err error
|
||||
|
||||
modified, err = time.Parse(time.RFC3339Nano, modifiedStr)
|
||||
if err != nil {
|
||||
modified = object.System.Created
|
||||
}
|
||||
}
|
||||
|
||||
bucketName, _ := bucket.Split(path.Join(f.root, relative))
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
|
||||
absolute: norm.NFC.String(bucketName + "/" + object.Key),
|
||||
|
||||
size: object.System.ContentLength,
|
||||
created: object.System.Created,
|
||||
modified: modified,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
// It is possible that we have an empty root (meaning the filesystem is
|
||||
// rooted at the project level). In this case the relative path is just
|
||||
// the full absolute path to the object (including the bucket name).
|
||||
if o.fs.root == "" {
|
||||
return o.absolute
|
||||
}
|
||||
|
||||
// At this point we know that the filesystem itself is at least a
|
||||
// bucket name (and possibly a prefix path).
|
||||
//
|
||||
// . This is necessary to remove the slash.
|
||||
// |
|
||||
// v
|
||||
return o.absolute[len(o.fs.root)+1:]
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modified
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (_ string, err error) {
|
||||
fs.Debugf(o, "%s", ty)
|
||||
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
fs.Debugf(o, "touch -d %q sj://%s", t, o.absolute)
|
||||
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadCloser, err error) {
|
||||
fs.Debugf(o, "cat sj://%s # %+v", o.absolute, options)
|
||||
|
||||
bucketName, bucketPath := bucket.Split(o.absolute)
|
||||
|
||||
// Convert the semantics of HTTP range headers to an offset and length
|
||||
// that libuplink can use.
|
||||
var (
|
||||
offset int64 = 0
|
||||
length int64 = -1
|
||||
)
|
||||
|
||||
for _, option := range options {
|
||||
switch opt := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
s := opt.Start >= 0
|
||||
e := opt.End >= 0
|
||||
|
||||
switch {
|
||||
case s && e:
|
||||
offset = opt.Start
|
||||
length = (opt.End + 1) - opt.Start
|
||||
case s && !e:
|
||||
offset = opt.Start
|
||||
case !s && e:
|
||||
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset = object.System.ContentLength - opt.End
|
||||
length = opt.End
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = opt.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Errorf(o, "Unsupported mandatory option: %v", option)
|
||||
|
||||
return nil, errors.New("unsupported mandatory option")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(o, "range %d + %d", offset, length)
|
||||
|
||||
return o.fs.project.DownloadObject(ctx, bucketName, bucketPath, &uplink.DownloadOptions{
|
||||
Offset: offset,
|
||||
Length: length,
|
||||
})
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
|
||||
|
||||
oNew, err := o.fs.Put(ctx, in, src, options...)
|
||||
|
||||
if err == nil {
|
||||
*o = *(oNew.(*Object))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove this object.
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
fs.Debugf(o, "rm sj://%s", o.absolute)
|
||||
|
||||
bucketName, bucketPath := bucket.Split(o.absolute)
|
||||
|
||||
_, err = o.fs.project.DeleteObject(ctx, bucketName, bucketPath)
|
||||
|
||||
return err
|
||||
}
|
||||
19
backend/tardigrade/tardigrade_test.go
Normal file
19
backend/tardigrade/tardigrade_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build go1.13,!plan9
|
||||
|
||||
// Test Tardigrade filesystem interface
|
||||
package tardigrade_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/tardigrade"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestTardigrade:",
|
||||
NilObject: (*tardigrade.Object)(nil),
|
||||
})
|
||||
}
|
||||
3
backend/tardigrade/tardigrade_unsupported.go
Normal file
3
backend/tardigrade/tardigrade_unsupported.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build !go1.13 plan9
|
||||
|
||||
package tardigrade
|
||||
@@ -13,7 +13,7 @@ func init() {
|
||||
registerPolicy("epall", &EpAll{})
|
||||
}
|
||||
|
||||
// EpAll stands for existing path, All
|
||||
// EpAll stands for existing path, all
|
||||
// Action category: apply to all found.
|
||||
// Create category: apply to all found.
|
||||
// Search category: same as epff.
|
||||
|
||||
@@ -11,7 +11,7 @@ func init() {
|
||||
registerPolicy("lus", &Lus{})
|
||||
}
|
||||
|
||||
// Lus stands for least free space
|
||||
// Lus stands for least used space
|
||||
// Search category: same as eplus.
|
||||
// Action category: same as eplus.
|
||||
// Create category: Pick the drive with the least used space.
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -47,7 +49,7 @@ func init() {
|
||||
Default: "ff",
|
||||
}, {
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds)",
|
||||
Help: "Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used.",
|
||||
Required: true,
|
||||
Default: 120,
|
||||
}},
|
||||
@@ -200,19 +202,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o := srcObj.UnWrap()
|
||||
u := o.UpstreamFs()
|
||||
do := u.Features().Copy
|
||||
if do == nil {
|
||||
su := o.UpstreamFs()
|
||||
if su.Features().Copy == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if !u.IsCreatable() {
|
||||
var du *upstream.Fs
|
||||
for _, u := range f.upstreams {
|
||||
if operations.Same(u.RootFs, su.RootFs) {
|
||||
du = u
|
||||
}
|
||||
}
|
||||
if du == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if !du.IsCreatable() {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
co, err := do(ctx, o, remote)
|
||||
co, err := du.Features().Copy(ctx, o, remote)
|
||||
if err != nil || co == nil {
|
||||
return nil, err
|
||||
}
|
||||
wo, err := f.wrapEntries(u.WrapObject(co))
|
||||
wo, err := f.wrapEntries(du.WrapObject(co))
|
||||
return wo.(*Object), err
|
||||
}
|
||||
|
||||
@@ -243,18 +253,28 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
objs := make([]*upstream.Object, len(entries))
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
u := entries[i].UpstreamFs()
|
||||
su := entries[i].UpstreamFs()
|
||||
o, ok := entries[i].(*upstream.Object)
|
||||
if !ok {
|
||||
errs[i] = errors.Wrap(fs.ErrorNotAFile, u.Name())
|
||||
errs[i] = errors.Wrap(fs.ErrorNotAFile, su.Name())
|
||||
return
|
||||
}
|
||||
mo, err := u.Features().Move(ctx, o.UnWrap(), remote)
|
||||
var du *upstream.Fs
|
||||
for _, u := range f.upstreams {
|
||||
if operations.Same(u.RootFs, su.RootFs) {
|
||||
du = u
|
||||
}
|
||||
}
|
||||
if du == nil {
|
||||
errs[i] = errors.Wrap(fs.ErrorCantMove, su.Name()+":"+remote)
|
||||
return
|
||||
}
|
||||
mo, err := du.Features().Move(ctx, o.UnWrap(), remote)
|
||||
if err != nil || mo == nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
errs[i] = errors.Wrap(err, su.Name())
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(mo)
|
||||
objs[i] = du.WrapObject(mo)
|
||||
})
|
||||
var en []upstream.Entry
|
||||
for _, o := range objs {
|
||||
@@ -297,7 +317,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
su := upstreams[i]
|
||||
var du *upstream.Fs
|
||||
for _, u := range f.upstreams {
|
||||
if u.RootFs.Root() == su.RootFs.Root() {
|
||||
if operations.Same(u.RootFs, su.RootFs) {
|
||||
du = u
|
||||
}
|
||||
}
|
||||
@@ -561,6 +581,69 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return f.mergeDirEntries(entriess)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
var entriess [][]upstream.Entry
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
var mutex sync.Mutex
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
var err error
|
||||
callback := func(entries fs.DirEntries) error {
|
||||
uEntries := make([]upstream.Entry, len(entries))
|
||||
for j, e := range entries {
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
}
|
||||
mutex.Lock()
|
||||
entriess = append(entriess, uEntries)
|
||||
mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
do := u.Features().ListR
|
||||
if do != nil {
|
||||
err = do(ctx, dir, callback)
|
||||
} else {
|
||||
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
}
|
||||
})
|
||||
if len(errs) == len(errs.FilterNil()) {
|
||||
errs = errs.Map(func(e error) error {
|
||||
if errors.Cause(e) == fs.ErrorDirNotFound {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
if len(errs) == 0 {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return errs.Err()
|
||||
}
|
||||
entries, err := f.mergeDirEntries(entriess)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
|
||||
// NewObject creates a new remote union file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
objs := make([]*upstream.Object, len(f.upstreams))
|
||||
@@ -730,36 +813,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
for _, f := range upstreams {
|
||||
if !f.IsWritable() {
|
||||
continue
|
||||
}
|
||||
features = features.Mask(f) // Mask all writable upstream fs
|
||||
features = features.Mask(f) // Mask all upstream fs
|
||||
}
|
||||
|
||||
// Really need the union of all upstreams for these, so
|
||||
// re-instate and calculate separately.
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
|
||||
// FIXME maybe should be masking the bools here?
|
||||
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
clearChangeNotify := true
|
||||
clearDirCacheFlush := true
|
||||
for _, u := range f.upstreams {
|
||||
uFeatures := u.Features()
|
||||
if uFeatures.ChangeNotify != nil {
|
||||
clearChangeNotify = false
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
for _, u := range upstreams {
|
||||
if u.Features().ListR != nil {
|
||||
features.ListR = f.ListR
|
||||
} else if !u.Features().IsLocal {
|
||||
features.ListR = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
if uFeatures.DirCacheFlush != nil {
|
||||
clearDirCacheFlush = false
|
||||
}
|
||||
}
|
||||
if clearChangeNotify {
|
||||
features.ChangeNotify = nil
|
||||
}
|
||||
if clearDirCacheFlush {
|
||||
features.DirCacheFlush = nil
|
||||
}
|
||||
|
||||
f.features = features
|
||||
@@ -806,4 +873,5 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -1135,6 +1135,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Options: options,
|
||||
}
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
|
||||
@@ -1065,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string, options ...fs.OpenOption) (err error) {
|
||||
// prepare upload
|
||||
var resp *http.Response
|
||||
var ur api.AsyncInfo
|
||||
@@ -1073,6 +1073,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
Method: "GET",
|
||||
Path: "/resources/upload",
|
||||
Parameters: url.Values{},
|
||||
Options: options,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||
@@ -1121,7 +1122,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
//upload file
|
||||
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src))
|
||||
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
3
bin/.ignore-emails
Normal file
3
bin/.ignore-emails
Normal file
@@ -0,0 +1,3 @@
|
||||
# Email addresses to ignore in the git log when making the authors.md file
|
||||
<nick@raig-wood.com>
|
||||
<anaghk.dos@gmail.com>
|
||||
136
bin/check-merged.go
Normal file
136
bin/check-merged.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// +build ignore
|
||||
|
||||
// Attempt to work out if branches have already been merged
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
master = flag.String("master", "master", "Branch to work out if merged into")
|
||||
version = "development version" // overridden by goreleaser
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, `Usage: %s [options]
|
||||
Version: %s
|
||||
|
||||
Attempt to work out if in the current git repo branches have been
|
||||
merged into master.
|
||||
|
||||
Example usage:
|
||||
|
||||
%s
|
||||
|
||||
Full options:
|
||||
`, os.Args[0], version, os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
var (
|
||||
printedSep = false
|
||||
)
|
||||
|
||||
const (
|
||||
sep1 = "============================================================"
|
||||
sep2 = "------------------------------------------------------------"
|
||||
)
|
||||
|
||||
// Show the diff between two git revisions
|
||||
func gitDiffDiff(rev1, rev2 string) {
|
||||
fmt.Printf("Diff of diffs of %q and %q\n", rev1, rev2)
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf(`diff <(git show "%s") <(git show "%s")`, rev1, rev2))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) && exitErr.ExitCode() == 1 {
|
||||
// OK just different
|
||||
} else {
|
||||
log.Fatalf("git diff diff failed: %#v", err)
|
||||
}
|
||||
}
|
||||
_, _ = os.Stdout.Write(out)
|
||||
}
|
||||
|
||||
var reCommit = regexp.MustCompile(`commit ([0-9a-f]{32,})`)
|
||||
|
||||
// Grep the git log for logLine
|
||||
func gitLogGrep(branch, rev, logLine string) {
|
||||
cmd := exec.Command("git", "log", "--grep", regexp.QuoteMeta(logLine), *master)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("git log grep failed: %v", err)
|
||||
}
|
||||
if len(out) > 0 {
|
||||
if !printedSep {
|
||||
fmt.Println(sep1)
|
||||
printedSep = true
|
||||
}
|
||||
fmt.Printf("Branch: %s - MAY BE MERGED to %s\nLog: %s\n\n", branch, *master, logLine)
|
||||
_, _ = os.Stdout.Write(out)
|
||||
match := reCommit.FindSubmatch(out)
|
||||
if len(match) != 0 {
|
||||
commit := string(match[1])
|
||||
fmt.Println(sep2)
|
||||
gitDiffDiff(rev, commit)
|
||||
}
|
||||
fmt.Println(sep1)
|
||||
}
|
||||
}
|
||||
|
||||
// * b2-fix-download-url 4209c768a [gone] b2: fix transfers when using download_url
|
||||
var reLine = regexp.MustCompile(`^[ *] (\S+)\s+([0-9a-f]+)\s+(?:\[[^]]+\] )?(.*)$`)
|
||||
|
||||
// Run git branch -v, parse the output and check it in the log
|
||||
func gitBranch() {
|
||||
cmd := exec.Command("git", "branch", "-v")
|
||||
cmd.Stderr = os.Stderr
|
||||
out, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatalf("git branch pipe failed: %v", err)
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("git branch failed: %v", err)
|
||||
}
|
||||
scanner := bufio.NewScanner(out)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
match := reLine.FindStringSubmatch(line)
|
||||
if len(match) != 4 {
|
||||
log.Printf("Invalid line %q", line)
|
||||
continue
|
||||
}
|
||||
branch, rev, logLine := match[1], match[2], match[3]
|
||||
if branch == *master {
|
||||
continue
|
||||
}
|
||||
//fmt.Printf("branch = %q, rev = %q, logLine = %q\n", branch, rev, logLine)
|
||||
gitLogGrep(branch, rev, logLine)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatalf("failed reading git branch: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Fatalf("git branch wait failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
usage()
|
||||
log.Fatal("Wrong number of arguments")
|
||||
}
|
||||
gitBranch()
|
||||
}
|
||||
@@ -182,6 +182,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
args := []string{
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||
"-trimpath",
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
|
||||
@@ -4,6 +4,7 @@ Make backend documentation
|
||||
"""
|
||||
|
||||
import os
|
||||
import io
|
||||
import subprocess
|
||||
|
||||
marker = "<!--- autogenerated options"
|
||||
@@ -19,6 +20,11 @@ def output_docs(backend, out):
|
||||
out.flush()
|
||||
subprocess.check_call(["rclone", "help", "backend", backend], stdout=out)
|
||||
|
||||
def output_backend_tool_docs(backend, out):
|
||||
"""Output documentation for backend tool to out"""
|
||||
out.flush()
|
||||
subprocess.call(["rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
|
||||
def alter_doc(backend):
|
||||
"""Alter the documentation for backend"""
|
||||
doc_file = "docs/content/"+backend+".md"
|
||||
@@ -35,6 +41,7 @@ def alter_doc(backend):
|
||||
start_full = start + " - DO NOT EDIT, instead edit fs.RegInfo in backend/%s/%s.go then run make backenddocs -->\n" % (backend, backend)
|
||||
out_file.write(start_full)
|
||||
output_docs(backend, out_file)
|
||||
output_backend_tool_docs(backend, out_file)
|
||||
out_file.write(stop+" -->\n")
|
||||
altered = True
|
||||
if not in_docs:
|
||||
|
||||
@@ -54,8 +54,10 @@ docs = [
|
||||
"pcloud.md",
|
||||
"premiumizeme.md",
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
|
||||
@@ -7,17 +7,15 @@ import re
|
||||
import subprocess
|
||||
|
||||
AUTHORS = "docs/content/authors.md"
|
||||
IGNORE = [ "nick@raig-wood.com" ]
|
||||
IGNORE = "bin/.ignore-emails"
|
||||
|
||||
def load():
|
||||
def load(filename):
|
||||
"""
|
||||
returns a set of emails already in authors.md
|
||||
returns a set of emails already in the file
|
||||
"""
|
||||
with open(AUTHORS) as fd:
|
||||
with open(filename) as fd:
|
||||
authors = fd.read()
|
||||
emails = set(re.findall(r"<(.*?)>", authors))
|
||||
emails.update(IGNORE)
|
||||
return emails
|
||||
return set(re.findall(r"<(.*?)>", authors))
|
||||
|
||||
def add_email(name, email):
|
||||
"""
|
||||
@@ -32,7 +30,9 @@ def main():
|
||||
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
|
||||
out = out.decode("utf-8")
|
||||
|
||||
previous = load()
|
||||
ignored = load(IGNORE)
|
||||
previous = load(AUTHORS)
|
||||
previous.update(ignored)
|
||||
for line in out.split("\n"):
|
||||
line = line.strip()
|
||||
if line == "":
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd"
|
||||
_ "github.com/rclone/rclone/cmd/about"
|
||||
_ "github.com/rclone/rclone/cmd/authorize"
|
||||
_ "github.com/rclone/rclone/cmd/backend"
|
||||
_ "github.com/rclone/rclone/cmd/cachestats"
|
||||
_ "github.com/rclone/rclone/cmd/cat"
|
||||
_ "github.com/rclone/rclone/cmd/check"
|
||||
|
||||
179
cmd/backend/backend.go
Normal file
179
cmd/backend/backend.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/rc"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
options []string
|
||||
useJSON bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name.")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "backend <command> remote:path [opts] <args>",
|
||||
Short: `Run a backend specific command.`,
|
||||
Long: `
|
||||
This runs a backend specific command. The commands themselves (except
|
||||
for "help" and "features") are defined by the backends and you should
|
||||
see the backend docs for definitions.
|
||||
|
||||
You can discover what commands a backend implements by using
|
||||
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, eg:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
|
||||
Pass arguments to the backend by placing them on the end of the line
|
||||
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend/command) in the rc docs.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1E6, command, args)
|
||||
name, remote := args[0], args[1]
|
||||
cmd.Run(false, false, command, func() error {
|
||||
// show help if remote is a backend name
|
||||
if name == "help" {
|
||||
fsInfo, err := fs.Find(remote)
|
||||
if err == nil {
|
||||
return showHelp(fsInfo)
|
||||
}
|
||||
}
|
||||
// Create remote
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := fsInfo.NewFs(configName, fsPath, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Run the command
|
||||
var out interface{}
|
||||
switch name {
|
||||
case "help":
|
||||
return showHelp(fsInfo)
|
||||
case "features":
|
||||
out = operations.GetFsInfo(f)
|
||||
default:
|
||||
doCommand := f.Features().Command
|
||||
if doCommand == nil {
|
||||
return errors.Errorf("%v: doesn't support backend commands", f)
|
||||
}
|
||||
arg := args[2:]
|
||||
opt := rc.ParseOptions(options)
|
||||
out, err = doCommand(context.Background(), name, arg, opt)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "command %q failed", name)
|
||||
|
||||
}
|
||||
// Output the result
|
||||
writeJSON := false
|
||||
if useJSON {
|
||||
writeJSON = true
|
||||
} else {
|
||||
switch x := out.(type) {
|
||||
case nil:
|
||||
case string:
|
||||
fmt.Println(out)
|
||||
case []string:
|
||||
for _, line := range x {
|
||||
fmt.Println(line)
|
||||
}
|
||||
default:
|
||||
writeJSON = true
|
||||
}
|
||||
}
|
||||
if writeJSON {
|
||||
// Write indented JSON to the output
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", "\t")
|
||||
err = enc.Encode(out)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write JSON")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// show help for a backend
|
||||
func showHelp(fsInfo *fs.RegInfo) error {
|
||||
cmds := fsInfo.CommandHelp
|
||||
name := fsInfo.Name
|
||||
if len(cmds) == 0 {
|
||||
return errors.Errorf("%s backend has no commands", name)
|
||||
}
|
||||
fmt.Printf("### Backend commands\n\n")
|
||||
fmt.Printf(`Here are the commands specific to the %s backend.
|
||||
|
||||
Run them with with
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
|
||||
`, name)
|
||||
for _, cmd := range cmds {
|
||||
fmt.Printf("#### %s\n\n", cmd.Name)
|
||||
fmt.Printf("%s\n\n", cmd.Short)
|
||||
fmt.Printf(" rclone backend %s remote: [options] [<arguments>+]\n\n", cmd.Name)
|
||||
if cmd.Long != "" {
|
||||
fmt.Printf("%s\n\n", cmd.Long)
|
||||
}
|
||||
if len(cmd.Opts) != 0 {
|
||||
fmt.Printf("Options:\n\n")
|
||||
|
||||
ks := []string{}
|
||||
for k := range cmd.Opts {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
sort.Strings(ks)
|
||||
for _, k := range ks {
|
||||
v := cmd.Opts[k]
|
||||
fmt.Printf("- %q: %s\n", k, v)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -22,8 +23,10 @@ var commandDefinition = &cobra.Command{
|
||||
Long: `
|
||||
Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])
|
||||
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -66,6 +66,7 @@ const (
|
||||
exitCodeNoRetryError
|
||||
exitCodeFatalError
|
||||
exitCodeTransferExceeded
|
||||
exitCodeNoFilesTransferred
|
||||
)
|
||||
|
||||
// ShowVersion prints the version to stdout
|
||||
@@ -312,6 +313,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
}
|
||||
resolveExitCode(cmdErr)
|
||||
|
||||
}
|
||||
|
||||
// CheckArgs checks there are enough arguments and prints a message if not
|
||||
@@ -430,6 +432,11 @@ func initConfig() {
|
||||
func resolveExitCode(err error) {
|
||||
atexit.Run()
|
||||
if err == nil {
|
||||
if fs.Config.ErrorOnNoTransfer {
|
||||
if accounting.GlobalStats().GetTransfers() == 0 {
|
||||
os.Exit(exitCodeNoFilesTransferred)
|
||||
}
|
||||
}
|
||||
os.Exit(exitCodeSuccess)
|
||||
}
|
||||
|
||||
|
||||
@@ -251,7 +251,14 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
}
|
||||
fill(name, nil, 0)
|
||||
if usingReaddirPlus {
|
||||
// We have called host.SetCapReaddirPlus() so supply the stat information
|
||||
var stat fuse.Stat_t
|
||||
_ = fsys.stat(node, &stat) // not capable of returning an error
|
||||
fill(name, &stat, 0)
|
||||
} else {
|
||||
fill(name, nil, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
itemsRead = len(items)
|
||||
@@ -268,25 +275,15 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
|
||||
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
||||
const blockSize = 4096
|
||||
const fsBlocks = (1 << 50) / blockSize
|
||||
stat.Blocks = fsBlocks // Total data blocks in file system.
|
||||
stat.Bfree = fsBlocks // Free blocks in file system.
|
||||
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
stat.Files = 1e9 // Total files in file system.
|
||||
stat.Ffree = 1e9 // Free files in file system.
|
||||
stat.Bsize = blockSize // Block size
|
||||
stat.Namemax = 255 // Maximum file name length?
|
||||
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
total, used, free := fsys.VFS.Statfs()
|
||||
if total >= 0 {
|
||||
stat.Blocks = uint64(total) / blockSize
|
||||
}
|
||||
if used >= 0 {
|
||||
stat.Bfree = stat.Blocks - uint64(used)/blockSize
|
||||
}
|
||||
if free >= 0 {
|
||||
stat.Bavail = uint64(free) / blockSize
|
||||
}
|
||||
total, _, free := fsys.VFS.Statfs()
|
||||
stat.Blocks = uint64(total) / blockSize // Total data blocks in file system.
|
||||
stat.Bfree = uint64(free) / blockSize // Free blocks in file system.
|
||||
stat.Bavail = stat.Bfree // Free blocks in file system if you're not root.
|
||||
stat.Files = 1e9 // Total files in file system.
|
||||
stat.Ffree = 1e9 // Free files in file system.
|
||||
stat.Bsize = blockSize // Block size
|
||||
stat.Namemax = 255 // Maximum file name length?
|
||||
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
mountlib.ClipBlocks(&stat.Blocks)
|
||||
mountlib.ClipBlocks(&stat.Bfree)
|
||||
mountlib.ClipBlocks(&stat.Bavail)
|
||||
@@ -371,12 +368,7 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var err error
|
||||
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = handle.WriteAt(buff, ofst)
|
||||
} else {
|
||||
n, err = handle.Write(buff)
|
||||
}
|
||||
n, err := handle.WriteAt(buff, ofst)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
@@ -553,11 +545,11 @@ func translateError(err error) (errc int) {
|
||||
switch errors.Cause(err) {
|
||||
case vfs.OK:
|
||||
return 0
|
||||
case vfs.ENOENT:
|
||||
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
|
||||
return -fuse.ENOENT
|
||||
case vfs.EEXIST:
|
||||
case vfs.EEXIST, fs.ErrorDirExists:
|
||||
return -fuse.EEXIST
|
||||
case vfs.EPERM:
|
||||
case vfs.EPERM, fs.ErrorPermissionDenied:
|
||||
return -fuse.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return -fuse.EBADF
|
||||
@@ -569,7 +561,7 @@ func translateError(err error) (errc int) {
|
||||
return -fuse.EBADF
|
||||
case vfs.EROFS:
|
||||
return -fuse.EROFS
|
||||
case vfs.ENOSYS:
|
||||
case vfs.ENOSYS, fs.ErrorNotImplemented:
|
||||
return -fuse.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return -fuse.EINVAL
|
||||
|
||||
@@ -26,6 +26,13 @@ import (
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
const (
|
||||
// SetCapReaddirPlus informs the host that the hosted file system has the readdir-plus
|
||||
// capability [Windows only]. A file system that has the readdir-plus capability can send
|
||||
// full stat information during Readdir, thus avoiding extraneous Getattr calls.
|
||||
usingReaddirPlus = runtime.GOOS == "windows"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "cmount"
|
||||
if runtime.GOOS == "windows" {
|
||||
@@ -142,6 +149,10 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
// Create underlying FS
|
||||
fsys := NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
if usingReaddirPlus {
|
||||
host.SetCapReaddirPlus(true)
|
||||
}
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
|
||||
|
||||
@@ -11,9 +11,9 @@ package cmount
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib/mounttest"
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
mounttest.RunTests(t, mount)
|
||||
vfstest.RunTests(t, false, mount)
|
||||
}
|
||||
|
||||
@@ -66,8 +66,8 @@ option when copying a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can to copy all the files which have
|
||||
changed recently very efficiently like this:
|
||||
them change every day, you can copy all the files which have changed
|
||||
recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
|
||||
@@ -15,12 +15,14 @@ import (
|
||||
var (
|
||||
autoFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
|
||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
|
||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
|
||||
}
|
||||
|
||||
@@ -35,6 +37,9 @@ Setting --auto-filename will cause the file name to be retreived from
|
||||
the from URL (after any redirections) and used in the destination
|
||||
path.
|
||||
|
||||
Setting --no-clobber will prevent overwriting file on the
|
||||
destination if there is one with the same name.
|
||||
|
||||
Setting --stdout or making the output file name "-" will cause the
|
||||
output to be written to standard output.
|
||||
`,
|
||||
@@ -59,7 +64,7 @@ output to be written to standard output.
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename)
|
||||
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -110,7 +110,6 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false
|
||||
}
|
||||
fs.Debugf(src, "OK")
|
||||
return false, false
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ use it like this
|
||||
}
|
||||
|
||||
// cryptDecode returns the unencrypted file name
|
||||
func cryptDecode(cipher crypt.Cipher, args []string) error {
|
||||
func cryptDecode(cipher *crypt.Cipher, args []string) error {
|
||||
output := ""
|
||||
|
||||
for _, encryptedFileName := range args {
|
||||
@@ -78,7 +78,7 @@ func cryptDecode(cipher crypt.Cipher, args []string) error {
|
||||
}
|
||||
|
||||
// cryptEncode returns the encrypted file name
|
||||
func cryptEncode(cipher crypt.Cipher, args []string) error {
|
||||
func cryptEncode(cipher *crypt.Cipher, args []string) error {
|
||||
output := ""
|
||||
|
||||
for _, fileName := range args {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/backend/dropbox"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -23,9 +24,11 @@ hashes are calculated according to [Dropbox content hash
|
||||
rules](https://www.dropbox.com/developers/reference/content-hash).
|
||||
The output is in the same format as md5sum and sha1sum.
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return operations.HashLister(context.Background(), dropbox.DbHashType, fsrc, os.Stdout)
|
||||
})
|
||||
|
||||
@@ -4,12 +4,19 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
rmdirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &rmdirs, "rmdirs", "", rmdirs, "rmdirs removes empty directories but leaves root intact")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -23,6 +30,8 @@ filters so can be used to selectively delete files.
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
` + "`" + `rclone purge` + "`" + `
|
||||
|
||||
If you supply the --rmdirs flag, it will remove all empty directories along with it.
|
||||
|
||||
Eg delete all files bigger than 100MBytes
|
||||
|
||||
Check what would be deleted first (use either)
|
||||
@@ -41,7 +50,14 @@ delete all files bigger than 100MBytes.
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return operations.Delete(context.Background(), fsrc)
|
||||
if err := operations.Delete(context.Background(), fsrc); err != nil {
|
||||
return err
|
||||
}
|
||||
if rmdirs {
|
||||
fdst := cmd.NewFsDir(args)
|
||||
return operations.Rmdirs(context.Background(), fdst, "", true)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
44
cmd/genautocomplete/genautocomplete_fish.go
Normal file
44
cmd/genautocomplete/genautocomplete_fish.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
completionDefinition.AddCommand(fishCommandDefinition)
|
||||
}
|
||||
|
||||
var fishCommandDefinition = &cobra.Command{
|
||||
Use: "fish [output_file]",
|
||||
Short: `Output fish completion script for rclone.`,
|
||||
Long: `
|
||||
Generates a fish autocompletion script for rclone.
|
||||
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, eg
|
||||
|
||||
sudo rclone genautocomplete fish
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
. /etc/fish/completions/rclone.fish
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/fish/completions/rclone.fish"
|
||||
if len(args) > 0 {
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenFishCompletionFile(out, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -33,3 +33,16 @@ func TestCompletionZsh(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionFish(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_fish")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
@@ -132,13 +132,13 @@ Eg
|
||||
"this file contains a comma, in the file name.txt",6
|
||||
|
||||
Note that the --absolute parameter is useful for making lists of files
|
||||
to pass to an rclone copy with the --files-from flag.
|
||||
to pass to an rclone copy with the --files-from-raw flag.
|
||||
|
||||
For example to find all the files modified within one day and copy
|
||||
those only (without traversing the whole directory structure):
|
||||
|
||||
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
||||
rclone copy --files-from new_files /path/to/local remote:path
|
||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||
|
||||
` + lshelp.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
@@ -185,6 +185,7 @@ func Lsf(ctx context.Context, fsrc fs.Fs, out io.Writer) error {
|
||||
case 'h':
|
||||
list.AddHash(hashType)
|
||||
opt.ShowHash = true
|
||||
opt.HashTypes = []string{hashType.String()}
|
||||
case 'i':
|
||||
list.AddID()
|
||||
case 'm':
|
||||
|
||||
@@ -29,6 +29,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
|
||||
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
|
||||
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
|
||||
flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated).")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -58,17 +59,25 @@ The output is an array of Items, where each Item looks like this
|
||||
"Tier" : "hot",
|
||||
}
|
||||
|
||||
If --hash is not specified the Hashes property won't be emitted.
|
||||
If --hash is not specified the Hashes property won't be emitted. The
|
||||
types of hash can be specified with the --hash-type parameter (which
|
||||
may be repeated). If --hash-type is set then it implies --hash.
|
||||
|
||||
If --no-modtime is specified then ModTime will be blank. This can speed things up on remotes where reading the ModTime takes an extra request (eg s3, swift).
|
||||
If --no-modtime is specified then ModTime will be blank. This can
|
||||
speed things up on remotes where reading the ModTime takes an extra
|
||||
request (eg s3, swift).
|
||||
|
||||
If --no-mimetype is specified then MimeType will be blank. This can speed things up on remotes where reading the MimeType takes an extra request (eg s3, swift).
|
||||
If --no-mimetype is specified then MimeType will be blank. This can
|
||||
speed things up on remotes where reading the MimeType takes an extra
|
||||
request (eg s3, swift).
|
||||
|
||||
If --encrypted is not specified the Encrypted won't be emitted.
|
||||
|
||||
If --dirs-only is not specified files in addition to directories are returned
|
||||
If --dirs-only is not specified files in addition to directories are
|
||||
returned
|
||||
|
||||
If --files-only is not specified directories in addition to the files will be returned.
|
||||
If --files-only is not specified directories in addition to the files
|
||||
will be returned.
|
||||
|
||||
The Path field will only show folders below the remote path being listed.
|
||||
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
|
||||
|
||||
@@ -76,13 +76,23 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
|
||||
return nil, translateError(err)
|
||||
}
|
||||
resp.EntryValid = mountlib.AttrTimeout
|
||||
// Check the mnode to see if it has a fuse Node cached
|
||||
// We must return the same fuse nodes for vfs Nodes
|
||||
node, ok := mnode.Sys().(fusefs.Node)
|
||||
if ok {
|
||||
return node, nil
|
||||
}
|
||||
switch x := mnode.(type) {
|
||||
case *vfs.File:
|
||||
return &File{x}, nil
|
||||
node = &File{x}
|
||||
case *vfs.Dir:
|
||||
return &Dir{x}, nil
|
||||
node = &Dir{x}
|
||||
default:
|
||||
panic("bad type")
|
||||
}
|
||||
panic("bad type")
|
||||
// Cache the node for later
|
||||
mnode.SetSys(node)
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
@@ -129,7 +139,9 @@ func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.Cr
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
return &File{file}, &FileHandle{fh}, err
|
||||
node = &File{file}
|
||||
file.SetSys(node) // cache the FUSE node for later
|
||||
return node, &FileHandle{fh}, err
|
||||
}
|
||||
|
||||
var _ fusefs.NodeMkdirer = (*Dir)(nil)
|
||||
@@ -141,7 +153,9 @@ func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.No
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &Dir{dir}, nil
|
||||
node = &Dir{dir}
|
||||
dir.SetSys(node) // cache the FUSE node for later
|
||||
return node, nil
|
||||
}
|
||||
|
||||
var _ fusefs.NodeRemover = (*Dir)(nil)
|
||||
|
||||
@@ -54,25 +54,15 @@ var _ fusefs.FSStatfser = (*FS)(nil)
|
||||
func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
|
||||
defer log.Trace("", "")("stat=%+v, err=%v", resp, &err)
|
||||
const blockSize = 4096
|
||||
const fsBlocks = (1 << 50) / blockSize
|
||||
resp.Blocks = fsBlocks // Total data blocks in file system.
|
||||
resp.Bfree = fsBlocks // Free blocks in file system.
|
||||
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
resp.Files = 1e9 // Total files in file system.
|
||||
resp.Ffree = 1e9 // Free files in file system.
|
||||
resp.Bsize = blockSize // Block size
|
||||
resp.Namelen = 255 // Maximum file name length?
|
||||
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
total, used, free := f.VFS.Statfs()
|
||||
if total >= 0 {
|
||||
resp.Blocks = uint64(total) / blockSize
|
||||
}
|
||||
if used >= 0 {
|
||||
resp.Bfree = resp.Blocks - uint64(used)/blockSize
|
||||
}
|
||||
if free >= 0 {
|
||||
resp.Bavail = uint64(free) / blockSize
|
||||
}
|
||||
total, _, free := f.VFS.Statfs()
|
||||
resp.Blocks = uint64(total) / blockSize // Total data blocks in file system.
|
||||
resp.Bfree = uint64(free) / blockSize // Free blocks in file system.
|
||||
resp.Bavail = resp.Bfree // Free blocks in file system if you're not root.
|
||||
resp.Files = 1e9 // Total files in file system.
|
||||
resp.Ffree = 1e9 // Free files in file system.
|
||||
resp.Bsize = blockSize // Block size
|
||||
resp.Namelen = 255 // Maximum file name length?
|
||||
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
mountlib.ClipBlocks(&resp.Blocks)
|
||||
mountlib.ClipBlocks(&resp.Bfree)
|
||||
mountlib.ClipBlocks(&resp.Bavail)
|
||||
@@ -87,11 +77,11 @@ func translateError(err error) error {
|
||||
switch errors.Cause(err) {
|
||||
case vfs.OK:
|
||||
return nil
|
||||
case vfs.ENOENT:
|
||||
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
|
||||
return fuse.ENOENT
|
||||
case vfs.EEXIST:
|
||||
case vfs.EEXIST, fs.ErrorDirExists:
|
||||
return fuse.EEXIST
|
||||
case vfs.EPERM:
|
||||
case vfs.EPERM, fs.ErrorPermissionDenied:
|
||||
return fuse.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return fuse.Errno(syscall.EBADF)
|
||||
@@ -103,7 +93,7 @@ func translateError(err error) error {
|
||||
return fuse.Errno(syscall.EBADF)
|
||||
case vfs.EROFS:
|
||||
return fuse.Errno(syscall.EROFS)
|
||||
case vfs.ENOSYS:
|
||||
case vfs.ENOSYS, fs.ErrorNotImplemented:
|
||||
return fuse.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return fuse.Errno(syscall.EINVAL)
|
||||
|
||||
@@ -5,7 +5,6 @@ package mount
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
@@ -42,12 +41,7 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
|
||||
// Write data to the file handle
|
||||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
var n int
|
||||
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
} else {
|
||||
n, err = fh.Handle.Write(req.Data)
|
||||
}
|
||||
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib/mounttest"
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
if runtime.NumCPU() <= 2 {
|
||||
t.Skip("FIXME skipping mount tests as they lock up on <= 2 CPUs - See: https://github.com/rclone/rclone/issues/3154")
|
||||
}
|
||||
mounttest.RunTests(t, mount)
|
||||
vfstest.RunTests(t, false, mount)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
@@ -74,11 +73,7 @@ func (f *FileHandle) Write(ctx context.Context, data []byte, off int64) (written
|
||||
var n int
|
||||
var err error
|
||||
defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno)
|
||||
if f.h.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || f.h.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = f.h.WriteAt(data, off)
|
||||
} else {
|
||||
n, err = f.h.Write(data)
|
||||
}
|
||||
n, err = f.h.WriteAt(data, off)
|
||||
return uint32(n), translateError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -105,11 +105,11 @@ func translateError(err error) syscall.Errno {
|
||||
switch errors.Cause(err) {
|
||||
case vfs.OK:
|
||||
return 0
|
||||
case vfs.ENOENT:
|
||||
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
|
||||
return syscall.ENOENT
|
||||
case vfs.EEXIST:
|
||||
case vfs.EEXIST, fs.ErrorDirExists:
|
||||
return syscall.EEXIST
|
||||
case vfs.EPERM:
|
||||
case vfs.EPERM, fs.ErrorPermissionDenied:
|
||||
return syscall.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return syscall.EBADF
|
||||
@@ -121,7 +121,7 @@ func translateError(err error) syscall.Errno {
|
||||
return syscall.EBADF
|
||||
case vfs.EROFS:
|
||||
return syscall.EROFS
|
||||
case vfs.ENOSYS:
|
||||
case vfs.ENOSYS, fs.ErrorNotImplemented:
|
||||
return syscall.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return syscall.EINVAL
|
||||
|
||||
@@ -5,9 +5,9 @@ package mount2
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib/mounttest"
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
mounttest.RunTests(t, mount)
|
||||
vfstest.RunTests(t, false, mount)
|
||||
}
|
||||
|
||||
@@ -27,11 +27,20 @@ type Node struct {
|
||||
var _ fusefs.InodeEmbedder = (*Node)(nil)
|
||||
|
||||
// newNode creates a new fusefs.Node from a vfs Node
|
||||
func newNode(fsys *FS, node vfs.Node) *Node {
|
||||
return &Node{
|
||||
node: node,
|
||||
func newNode(fsys *FS, vfsNode vfs.Node) (node *Node) {
|
||||
// Check the vfsNode to see if it has a fuse Node cached
|
||||
// We must return the same fuse nodes for vfs Nodes
|
||||
node, ok := vfsNode.Sys().(*Node)
|
||||
if ok {
|
||||
return node
|
||||
}
|
||||
node = &Node{
|
||||
node: vfsNode,
|
||||
fsys: fsys,
|
||||
}
|
||||
// Cache the node for later
|
||||
vfsNode.SetSys(node)
|
||||
return node
|
||||
}
|
||||
|
||||
// String used for pretty printing.
|
||||
@@ -183,10 +192,7 @@ func (n *Node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (ino
|
||||
if errno != 0 {
|
||||
return nil, errno
|
||||
}
|
||||
newNode := &Node{
|
||||
node: vfsNode,
|
||||
fsys: n.fsys,
|
||||
}
|
||||
newNode := newNode(n.fsys, vfsNode)
|
||||
|
||||
// FIXME
|
||||
// out.SetEntryTimeout(dt time.Duration)
|
||||
|
||||
@@ -116,13 +116,16 @@ foreground mode by default, use the --daemon flag to specify background mode mod
|
||||
Background mode is only supported on Linux and OSX, you can only run mount in
|
||||
foreground mode on Windows.
|
||||
|
||||
Start the mount like this
|
||||
On Linux/macOS/FreeBSD Start the mount like this where ` + "`/path/to/local/mount`" + `
|
||||
is an **empty** **existing** directory.
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
|
||||
|
||||
Or on Windows like this where X: is an unused drive letter
|
||||
Or on Windows like this where ` + "`X:`" + ` is an unused drive letter
|
||||
or use a path to **non-existent** directory.
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files X:
|
||||
rclone ` + commandName + ` remote:path/to/files C:\path\to\nonexistent\directory
|
||||
|
||||
When running in background mode the user will have to stop the mount manually (specified below).
|
||||
|
||||
@@ -312,6 +315,8 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
} else if AllowNonEmpty && runtime.GOOS == "windows" {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
|
||||
// Work out the volume name, removing special
|
||||
@@ -348,7 +353,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
// mount options
|
||||
flags.BoolVarP(cmdFlags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
|
||||
flags.BoolVarP(cmdFlags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory (not Windows).")
|
||||
flags.BoolVarP(cmdFlags, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(cmdFlags, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(cmdFlags, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
|
||||
50
cmd/rc/rc.go
50
cmd/rc/rc.go
@@ -27,6 +27,8 @@ var (
|
||||
authUser = ""
|
||||
authPass = ""
|
||||
loopback = false
|
||||
options []string
|
||||
arguments []string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -38,6 +40,8 @@ func init() {
|
||||
flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control.")
|
||||
flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control.")
|
||||
flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP.")
|
||||
flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array.")
|
||||
flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -63,6 +67,29 @@ The --json parameter can be used to pass in a JSON blob as an input
|
||||
instead of key=value arguments. This is the only way of passing in
|
||||
more complicated values.
|
||||
|
||||
The -o/--opt option can be used to set a key "opt" with key, value
|
||||
options in the form "-o key=value" or "-o key". It can be repeated as
|
||||
many times as required. This is useful for rc commands which take the
|
||||
"opt" parameter which by convention is a dictionary of strings.
|
||||
|
||||
-o key=value -o key2
|
||||
|
||||
Will place this in the "opt" value
|
||||
|
||||
{"key":"value", "key2","")
|
||||
|
||||
|
||||
The -a/--arg option can be used to set strings in the "arg" value. It
|
||||
can be repeated as many times as required. This is useful for rc
|
||||
commands which take the "arg" parameter which by convention is a list
|
||||
of strings.
|
||||
|
||||
-a value -a value2
|
||||
|
||||
Will place this in the "arg" value
|
||||
|
||||
["value", "value2"]
|
||||
|
||||
Use --loopback to connect to the rclone instance running "rclone rc".
|
||||
This is very useful for testing commands without having to run an
|
||||
rclone rc server, eg:
|
||||
@@ -103,6 +130,23 @@ func parseFlags() {
|
||||
}
|
||||
}
|
||||
|
||||
// ParseOptions parses a slice of options in the form key=value or key
|
||||
// into a map
|
||||
func ParseOptions(options []string) (opt map[string]string) {
|
||||
opt = make(map[string]string, len(options))
|
||||
for _, option := range options {
|
||||
equals := strings.IndexRune(option, '=')
|
||||
key := option
|
||||
value := ""
|
||||
if equals >= 0 {
|
||||
key = option[:equals]
|
||||
value = option[equals+1:]
|
||||
}
|
||||
opt[key] = value
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
// If the user set flagName set the output to its value
|
||||
func setAlternateFlag(flagName string, output *string) {
|
||||
if rcFlag := pflag.Lookup(flagName); rcFlag != nil && rcFlag.Changed {
|
||||
@@ -210,6 +254,12 @@ func run(ctx context.Context, args []string) (err error) {
|
||||
return errors.Wrap(err, "bad --json input")
|
||||
}
|
||||
}
|
||||
if len(options) > 0 {
|
||||
in["opt"] = ParseOptions(options)
|
||||
}
|
||||
if len(arguments) > 0 {
|
||||
in["arg"] = arguments
|
||||
}
|
||||
|
||||
// Do the call
|
||||
out, callErr := doCall(ctx, path, in)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
@@ -131,9 +132,20 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, s.HTMLTemplate)
|
||||
for _, node := range dirEntries {
|
||||
directory.AddEntry(node.Path(), node.IsDir())
|
||||
if vfsflags.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
} else {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), node.ModTime().UTC())
|
||||
}
|
||||
}
|
||||
|
||||
sortParm := r.URL.Query().Get("sort")
|
||||
orderParm := r.URL.Query().Get("order")
|
||||
directory.ProcessQueryParams(sortParm, orderParm)
|
||||
|
||||
// Set the Last-Modified header to the timestamp
|
||||
w.Header().Set("Last-Modified", dir.ModTime().UTC().Format(http.TimeFormat))
|
||||
|
||||
directory.Serve(w, r)
|
||||
}
|
||||
|
||||
@@ -171,6 +183,9 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||
w.Header().Set("Content-Type", mimeType)
|
||||
}
|
||||
|
||||
// Set the Last-Modified header to the timestamp
|
||||
w.Header().Set("Last-Modified", file.ModTime().UTC().Format(http.TimeFormat))
|
||||
|
||||
// If HEAD no need to read the object since we have set the headers
|
||||
if r.Method == "HEAD" {
|
||||
return
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -25,11 +26,13 @@ var (
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testTemplate = "testdata/golden/testindex.html"
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.Template = testTemplate
|
||||
httpServer = newServer(f, &opt)
|
||||
assert.NoError(t, httpServer.Serve())
|
||||
testURL = httpServer.Server.URL()
|
||||
@@ -50,6 +53,11 @@ func startServer(t *testing.T, f fs.Fs) {
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
datedObject = "two.txt"
|
||||
expectedTime = time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
@@ -65,6 +73,11 @@ func TestInit(t *testing.T) {
|
||||
f, err := fs.NewFs("testdata/files")
|
||||
require.NoError(t, err)
|
||||
|
||||
// set date of datedObject to expectedTime
|
||||
obj, err := f.NewObject(context.Background(), datedObject)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, obj.SetModTime(context.Background(), expectedTime))
|
||||
|
||||
startServer(t, f)
|
||||
}
|
||||
|
||||
@@ -195,6 +208,18 @@ func TestGET(t *testing.T) {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check we got a Last-Modifed header and that it is a valid date
|
||||
if test.Status == http.StatusOK || test.Status == http.StatusPartialContent {
|
||||
lastModified := resp.Header.Get("Last-Modified")
|
||||
assert.NotEqual(t, "", lastModified, test.Golden)
|
||||
modTime, err := http.ParseTime(lastModified)
|
||||
assert.NoError(t, err, test.Golden)
|
||||
// check the actual date on our special file
|
||||
if test.URL == datedObject {
|
||||
assert.Equal(t, expectedTime, modTime, test.Golden)
|
||||
}
|
||||
}
|
||||
|
||||
checkGolden(t, test.Golden, body)
|
||||
}
|
||||
}
|
||||
|
||||
2
cmd/serve/http/testdata/golden/index.html
vendored
2
cmd/serve/http/testdata/golden/index.html
vendored
@@ -6,8 +6,8 @@
|
||||
</head>
|
||||
<body>
|
||||
<h1>Directory listing of /</h1>
|
||||
<a href="one%25.txt">one%.txt</a><br />
|
||||
<a href="three/">three/</a><br />
|
||||
<a href="one%25.txt">one%.txt</a><br />
|
||||
<a href="two.txt">two.txt</a><br />
|
||||
</body>
|
||||
</html>
|
||||
|
||||
11
cmd/serve/http/testdata/golden/testindex.html
vendored
Normal file
11
cmd/serve/http/testdata/golden/testindex.html
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ .Title }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ .Title }}</h1>
|
||||
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
|
||||
{{ end }}</body>
|
||||
</html>
|
||||
@@ -27,6 +27,7 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root.")
|
||||
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.")
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -52,6 +52,29 @@ inserts leading and trailing "/" on --baseurl, so --baseurl "rclone",
|
||||
--baseurl "/rclone" and --baseurl "/rclone/" are all treated
|
||||
identically.
|
||||
|
||||
--template allows a user to specify a custom markup template for http
|
||||
and webdav serve functions. The server exports the following markup
|
||||
to be used within the template to server pages:
|
||||
|
||||
| Parameter | Description |
|
||||
| :---------- | :---------- |
|
||||
| .Name | The full path of a file/directory. |
|
||||
| .Title | Directory listing of .Name |
|
||||
| .Sort | The current sort used. This is changeable via ?sort= parameter |
|
||||
| | Sort Options: namedirfist,name,size,time (default namedirfirst) |
|
||||
| .Order | The current ordering used. This is changeable via ?order= parameter |
|
||||
| | Order Options: asc,desc (default asc) |
|
||||
| .Query | Currently unused. |
|
||||
| .Breadcrumb | Allows for creating a relative navigation |
|
||||
|-- .Link | The relative to the root link of the Text. |
|
||||
|-- .Text | The Name of the directory. |
|
||||
| .Entries | Information about a specific file/directory. |
|
||||
|-- .URL | The 'url' of an entry. |
|
||||
|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. |
|
||||
|-- .IsDir | Boolean for if an entry is a directory or not. |
|
||||
|-- .Size | Size in Bytes of the entry. |
|
||||
|-- .ModTime | The UTC timestamp of an entry. |
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
@@ -101,6 +124,7 @@ type Options struct {
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
Auth AuthFn `json:"-"` // custom Auth (not set by command line flags)
|
||||
Template string // User specified template
|
||||
}
|
||||
|
||||
// AuthFn if used will be used to authenticate user, pass. If an error
|
||||
@@ -281,7 +305,7 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
htmlTemplate, templateErr := data.GetTemplate()
|
||||
htmlTemplate, templateErr := data.GetTemplate(s.Opt.Template)
|
||||
if templateErr != nil {
|
||||
log.Fatalf(templateErr.Error())
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user