mirror of
https://github.com/rclone/rclone.git
synced 2026-01-26 06:13:32 +00:00
Compare commits
213 Commits
v1.44
...
fix-2926-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f37c6f258 | ||
|
|
a2341cc412 | ||
|
|
9685be64cd | ||
|
|
39f5059d48 | ||
|
|
a30e80564d | ||
|
|
8e107b9657 | ||
|
|
21a0693b79 | ||
|
|
4846d9393d | ||
|
|
fc4f20d52f | ||
|
|
60558b5d37 | ||
|
|
5990573ccd | ||
|
|
bd11d3cb62 | ||
|
|
5e5578d2c3 | ||
|
|
1318c6aec8 | ||
|
|
f29757de3b | ||
|
|
f397c35935 | ||
|
|
f365230aea | ||
|
|
ff0b8e10af | ||
|
|
8d16a5693c | ||
|
|
781142a73f | ||
|
|
f471a7e3f5 | ||
|
|
d7a1fd2a6b | ||
|
|
7782eda88e | ||
|
|
d08453d402 | ||
|
|
71e98ea584 | ||
|
|
42d997f639 | ||
|
|
571b4c060b | ||
|
|
ff72059a94 | ||
|
|
2e6ef4f6ec | ||
|
|
0ec6dd9f4b | ||
|
|
0b7fdf16a2 | ||
|
|
5edfd31a6d | ||
|
|
7ee7bc87ae | ||
|
|
1433558c01 | ||
|
|
0458b961c5 | ||
|
|
c1998c4efe | ||
|
|
49da220b65 | ||
|
|
554ee0d963 | ||
|
|
2d2533a08a | ||
|
|
733b072d4f | ||
|
|
2d01a65e36 | ||
|
|
b8280521a5 | ||
|
|
60e6af2605 | ||
|
|
9d16822c63 | ||
|
|
38a0946071 | ||
|
|
95e52e1ac3 | ||
|
|
51ab1c940a | ||
|
|
6f30427357 | ||
|
|
3220acc729 | ||
|
|
3c97933416 | ||
|
|
039e2a9649 | ||
|
|
1c01d0b84a | ||
|
|
39eac7a765 | ||
|
|
082a7065b1 | ||
|
|
f7b08a6982 | ||
|
|
37e32d8c80 | ||
|
|
f2a1b991de | ||
|
|
4128e696d6 | ||
|
|
7e7f3de355 | ||
|
|
1f6a1cd26d | ||
|
|
2cfe2354df | ||
|
|
13387c0838 | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 | ||
|
|
a6c28a5faa | ||
|
|
d35bd15762 | ||
|
|
8b8220c4f7 | ||
|
|
5fe3b0ad71 | ||
|
|
4c8c87a935 | ||
|
|
bb10a51b39 | ||
|
|
df01f7a4eb | ||
|
|
e84790ef79 | ||
|
|
369a8ee17b | ||
|
|
84e21ade6b | ||
|
|
703b0535a4 | ||
|
|
155264ae12 | ||
|
|
31e2ce03c3 | ||
|
|
e969505ae4 | ||
|
|
26e2f1a998 | ||
|
|
2682d5a9cf | ||
|
|
2191592e80 | ||
|
|
18f758294e | ||
|
|
f95c1c61dd | ||
|
|
8c8dcdd521 | ||
|
|
141c133818 | ||
|
|
0f03e55cd1 | ||
|
|
9e6ba92a11 | ||
|
|
762561f88e | ||
|
|
084fe38922 | ||
|
|
63a2a935fc | ||
|
|
64fce8438b | ||
|
|
f92beb4e14 | ||
|
|
f7ce2e8d95 | ||
|
|
3975d82b3b | ||
|
|
d87aa33ec5 | ||
|
|
1b78f4d1ea | ||
|
|
b3704597f3 | ||
|
|
16f797a7d7 | ||
|
|
ee700ec01a | ||
|
|
9b3c951ab7 | ||
|
|
22d17e79e3 | ||
|
|
6d3088a00b | ||
|
|
84202c7471 | ||
|
|
96a05516f9 | ||
|
|
4f6a942595 | ||
|
|
c4b0a37b21 | ||
|
|
9322f4baef | ||
|
|
fa0a1e7261 | ||
|
|
4ad08794c9 | ||
|
|
c0f600764b | ||
|
|
f139e07380 | ||
|
|
c6786eeb2d | ||
|
|
57b85b8155 | ||
|
|
2b1194c57e | ||
|
|
e6dd121f52 | ||
|
|
e600217666 | ||
|
|
bc17ca7ed9 | ||
|
|
1916410316 | ||
|
|
dddfbec92a | ||
|
|
75a88de55c | ||
|
|
2466f4d152 | ||
|
|
39283c8a35 | ||
|
|
46c2f55545 | ||
|
|
fc2afcbcbd | ||
|
|
fa0a9653d2 | ||
|
|
181267e20e | ||
|
|
75e8ea383c | ||
|
|
8c8b58a7de | ||
|
|
b961e07c57 | ||
|
|
0b80d1481a | ||
|
|
89550e7121 | ||
|
|
370c218c63 | ||
|
|
b972dcb0ae | ||
|
|
0bfa9811f7 | ||
|
|
aa9b2c31f4 | ||
|
|
cff75db6a4 | ||
|
|
75252e4a89 | ||
|
|
2089405e1b | ||
|
|
a379eec9d9 | ||
|
|
45d5339fcb | ||
|
|
bb5637d46a | ||
|
|
1f05d5bf4a | ||
|
|
ff87da9c3b | ||
|
|
3d81b75f44 | ||
|
|
baba6d67e6 | ||
|
|
04c0564fe2 | ||
|
|
91cfdb81f5 | ||
|
|
deae7bf33c | ||
|
|
04a0da1f92 | ||
|
|
9486df0226 | ||
|
|
948a5d25c2 | ||
|
|
f7c31cd210 | ||
|
|
696e7b2833 | ||
|
|
e76cf1217f | ||
|
|
543e37f662 | ||
|
|
c514cb752d | ||
|
|
c0ca93ae6f | ||
|
|
38a89d49ae | ||
|
|
6531126eb2 | ||
|
|
25d0e59ef8 | ||
|
|
b0db08fd2b | ||
|
|
07addf74fd | ||
|
|
52c7c738ca | ||
|
|
5c32b32011 | ||
|
|
fe61cff079 | ||
|
|
fbab1e55bb | ||
|
|
1bfd07567e | ||
|
|
f97c4c8d9d | ||
|
|
a3c55462a8 | ||
|
|
bbb9a504a8 | ||
|
|
dedc7d885c | ||
|
|
c5ac96e9e7 | ||
|
|
9959c5f17f | ||
|
|
e8d0a363fc | ||
|
|
935b7c1c0f | ||
|
|
15ce0ae57c | ||
|
|
67703a73de |
@@ -13,10 +13,10 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
@@ -29,6 +29,19 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
make circleci_upload
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
|
||||
@@ -4,7 +4,6 @@ dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
|
||||
@@ -123,6 +123,13 @@ but they can be run against any of the remotes.
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
@@ -343,7 +350,13 @@ Unit tests
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fstest/test_all/test_all.go`
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -365,4 +378,3 @@ Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
* Alex Chen @Cnly
|
||||
* Sandeep Ummadi @sandeepkru
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
633
MANUAL.html
633
MANUAL.html
File diff suppressed because it is too large
Load Diff
823
MANUAL.txt
823
MANUAL.txt
File diff suppressed because it is too large
Load Diff
16
Makefile
16
Makefile
@@ -50,10 +50,9 @@ version:
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
-go test -v -count 1 -timeout 20m $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
@@ -117,7 +116,7 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
./bin/make_backend_docs.py
|
||||
@@ -186,6 +185,13 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
|
||||
16
README.md
16
README.md
@@ -20,6 +20,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
@@ -55,6 +56,8 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
## Features
|
||||
|
||||
@@ -71,10 +74,15 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
Please see the rclone website for installation, usage, documentation,
|
||||
changelog and configuration walkthroughs.
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* https://rclone.org/
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
@@ -84,4 +92,4 @@ License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
[COPYING file](/rclone/COPYING) included in this package).
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
15
RELEASE.md
15
RELEASE.md
@@ -32,6 +32,21 @@ Early in the next release cycle update the vendored dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
@@ -264,7 +264,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -50,6 +52,7 @@ const (
|
||||
defaultUploadCutoff = 256 * fs.MebiByte
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -134,6 +137,7 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
@@ -271,6 +275,38 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
r, err := client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = pipeline.NewError(err, "HTTP request failed")
|
||||
}
|
||||
return pipeline.NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
azblob.NewUniqueRequestIDPolicyFactory(),
|
||||
azblob.NewRetryPolicyFactory(o.Retry),
|
||||
c,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
||||
}
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -306,6 +342,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
@@ -322,7 +375,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
@@ -331,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
@@ -348,24 +401,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
@@ -379,8 +417,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
@@ -436,6 +474,21 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
@@ -471,6 +524,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
ctx := context.Background()
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -500,13 +554,23 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -519,6 +583,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
if err != nil {
|
||||
@@ -705,6 +773,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
@@ -722,7 +795,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list("", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -917,27 +990,37 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.size = size
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
o.setMetadata(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.size = size
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
o.setMetadata(metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1368,7 +1451,7 @@ func (o *Object) SetTier(tier string) error {
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier)
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
|
||||
@@ -368,6 +368,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
@@ -980,6 +987,12 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -1003,6 +1016,9 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
|
||||
@@ -252,7 +252,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -471,7 +471,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
|
||||
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
|
||||
if !f.opt.StoreWrites {
|
||||
if f.opt.StoreWrites {
|
||||
fs.Infof(name, "Cache Writes: enabled")
|
||||
}
|
||||
|
||||
|
||||
26
backend/cache/cache_internal_test.go
vendored
26
backend/cache/cache_internal_test.go
vendored
@@ -5,14 +5,12 @@ package cache_test
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -32,11 +30,11 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -692,8 +690,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
rcflags.Opt.Enabled = true
|
||||
rc.Start(&rcflags.Opt)
|
||||
cacheExpire := rc.Calls.Get("cache/expire")
|
||||
assert.NotNil(t, cacheExpire)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
@@ -726,13 +724,8 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
|
||||
m := make(map[string]string)
|
||||
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res.Body).Decode(&m)
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -752,13 +745,8 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
m = make(map[string]string)
|
||||
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res2.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -284,6 +285,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
|
||||
@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
|
||||
@@ -7,13 +7,30 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -30,6 +47,9 @@ func TestStandard(t *testing.T) {
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -46,6 +66,9 @@ func TestOff(t *testing.T) {
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package drive interfaces with the Google Drive object storage system
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
// FIXME need to deal with some corner cases
|
||||
@@ -122,6 +125,29 @@ var (
|
||||
_linkTemplates map[string]*template.Template // available link types
|
||||
)
|
||||
|
||||
// Parse the scopes option returning a slice of scopes
|
||||
func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
return scopes
|
||||
}
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -136,18 +162,14 @@ func init() {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.Scope == "" {
|
||||
opt.Scope = defaultScope
|
||||
}
|
||||
driveConfig.Scopes = nil
|
||||
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if scope == "drive.appfolder" {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||
if err != nil {
|
||||
@@ -696,12 +718,16 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return nil
|
||||
}
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a team drive?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.ConfirmWithDefault(false) {
|
||||
if !config.Confirm() {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
@@ -753,7 +779,8 @@ func newPacer() *pacer.Pacer {
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
@@ -20,6 +22,31 @@ import (
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
@@ -243,10 +270,19 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("DocumentImport", f.InternalTestDocumentImport)
|
||||
t.Run("DocumentUpdate", f.InternalTestDocumentUpdate)
|
||||
t.Run("DocumentExport", f.InternalTestDocumentExport)
|
||||
t.Run("DocumentLink", f.InternalTestDocumentLink)
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
f.InternalTestDocumentImport(t)
|
||||
t.Run("DocumentUpdate", func(t *testing.T) {
|
||||
f.InternalTestDocumentUpdate(t)
|
||||
t.Run("DocumentExport", func(t *testing.T) {
|
||||
f.InternalTestDocumentExport(t)
|
||||
t.Run("DocumentLink", func(t *testing.T) {
|
||||
f.InternalTestDocumentLink(t)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
6
backend/drive/drive_unsupported.go
Normal file
6
backend/drive/drive_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
@@ -8,6 +8,8 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
@@ -31,9 +31,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
@@ -131,13 +133,19 @@ slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -149,6 +157,7 @@ type Fs struct {
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
@@ -195,7 +204,16 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// FIXME there is probably a better way of doing this!
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behaviour for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
return true, err
|
||||
}
|
||||
@@ -262,6 +280,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
|
||||
f.team = team.New(config)
|
||||
|
||||
if opt.Impersonate != "" {
|
||||
|
||||
user := team.UserSelectorArg{
|
||||
Email: opt.Impersonate,
|
||||
}
|
||||
user.Tag = "email"
|
||||
|
||||
members := []*team.UserSelectorArg{&user}
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
|
||||
@@ -646,7 +646,21 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
err := f.rc.Close()
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -193,7 +193,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -416,6 +416,9 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
|
||||
@@ -144,6 +144,11 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
|
||||
@@ -464,12 +464,12 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := path.Join(folder.Path, folder.Name)
|
||||
remoteDirLength := len(folderPath) - pathPrefixLength
|
||||
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if remoteDirLength > 0 {
|
||||
remoteDir = restoreReservedChars(folderPath[pathPrefixLength+1:])
|
||||
if remoteDirLength > startPathLength {
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -651,7 +653,7 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
in, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
@@ -741,6 +743,7 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
n, err = file.in.Read(p)
|
||||
log.Printf("*** Read result n=%d, err=%v, len(p)=%d", n, err, len(p))
|
||||
if n > 0 {
|
||||
// Hash routines never return an error
|
||||
_, _ = file.hash.Write(p[:n])
|
||||
@@ -780,7 +783,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := os.Open(o.path)
|
||||
fd, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -826,7 +829,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -38,7 +38,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
fd, err := file.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
|
||||
@@ -285,6 +285,7 @@ type AsyncOperationStatus struct {
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
|
||||
@@ -75,9 +75,8 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Are we running headless?
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -199,7 +198,7 @@ func init() {
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.Confirm() {
|
||||
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
@@ -334,20 +333,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -356,6 +345,72 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
firstSlashIndex := strings.IndexRune(path, '/')
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
|
||||
// For OneDrive Personal, we need to consider the "shared with me" folders.
|
||||
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
|
||||
// by its path relative to the folder's ID relative to the sharer's driveID.
|
||||
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
|
||||
// So we read metadata relative to a suitable folder's normalized ID.
|
||||
var dirCacheFoundRoot bool
|
||||
var rootNormalizedID string
|
||||
if f.dirCache != nil {
|
||||
var ok bool
|
||||
if rootNormalizedID, ok = f.dirCache.Get(""); ok {
|
||||
dirCacheFoundRoot = true
|
||||
}
|
||||
}
|
||||
|
||||
relPath, insideRoot := getRelativePathInsideBase(f.root, path)
|
||||
var firstDir, baseNormalizedID string
|
||||
if !insideRoot || !dirCacheFoundRoot {
|
||||
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
info, resp, err := f.readMetaDataForPath(firstDir)
|
||||
if err != nil {
|
||||
return info, resp, err
|
||||
}
|
||||
baseNormalizedID = info.GetID()
|
||||
} else {
|
||||
if f.root != "" {
|
||||
// Read metadata based on root
|
||||
baseNormalizedID = rootNormalizedID
|
||||
} else {
|
||||
// Read metadata based on firstDir
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -404,13 +459,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
log.Fatalf("Unable to get drive_id and drive_type. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@@ -514,18 +569,11 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
_, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
@@ -867,13 +915,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
DriveID: f.driveID,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
@@ -940,15 +988,23 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
|
||||
if dstDriveID != srcObjDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: id,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1024,7 +1080,20 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
|
||||
if dstDriveID != srcDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -1038,14 +1107,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1055,7 +1118,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: parsedDstDirID,
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1122,7 +1186,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.ID, "POST", "/createLink")
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
@@ -1270,13 +1334,13 @@ func (o *Object) ModTime() time.Time {
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1344,7 +1408,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -1477,13 +1541,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1566,8 +1630,8 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1582,7 +1646,10 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
@@ -1590,6 +1657,21 @@ func parseDirID(ID string) (string, string, string) {
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
if base == "" {
|
||||
return target, true
|
||||
}
|
||||
|
||||
baseSlash := base + "/"
|
||||
if strings.HasPrefix(target+"/", baseSlash) {
|
||||
return target[len(baseSlash):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -930,8 +932,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// resp.Body.Close()
|
||||
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
|
||||
|
||||
// 1 MB chunks size
|
||||
// 10 MB chunks size
|
||||
chunkSize := int64(1024 * 1024 * 10)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
chunkOffset := int64(0)
|
||||
remainingBytes := size
|
||||
chunkCounter := 0
|
||||
@@ -944,14 +947,19 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
remainingBytes -= currentChunkSize
|
||||
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
var formBody bytes.Buffer
|
||||
w := multipart.NewWriter(&formBody)
|
||||
fw, err := w.CreateFormFile("file_data", o.remote)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
|
||||
if _, err = io.Copy(fw, chunk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Add session_id
|
||||
@@ -1082,7 +1090,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -246,7 +246,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
|
||||
@@ -69,17 +69,54 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retries.",
|
||||
Help: "Number of connection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
minChunkSize = fs.SizeSuffix(minMultiPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -92,12 +129,15 @@ func timestampToTime(tp int64) time.Time {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
@@ -227,6 +267,36 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -235,6 +305,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -913,16 +991,24 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
}
|
||||
uploader := newUploader(&req)
|
||||
|
||||
err = uploader.upload()
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if multipart {
|
||||
err = uploader.upload()
|
||||
} else {
|
||||
err = uploader.singlePartUpload(in, size)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package qingstor_test
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/qingstor"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -15,6 +15,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQingStor:",
|
||||
NilObject: (*qingstor.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -152,11 +152,11 @@ func (u *uploader) init() {
|
||||
}
|
||||
|
||||
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
|
||||
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
|
||||
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
||||
bucketInit, _ := u.bucketInit()
|
||||
|
||||
req := qs.PutObjectInput{
|
||||
ContentLength: &u.readerPos,
|
||||
ContentLength: &size,
|
||||
ContentType: &u.cfg.mimeType,
|
||||
Body: buf,
|
||||
}
|
||||
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Tried to upload a singile object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader, u.readerPos)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
mu := multiUploader{uploader: u}
|
||||
return mu.multiPartUpload(reader)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
|
||||
req := qs.InitiateMultipartUploadInput{
|
||||
ContentType: &mu.cfg.mimeType,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to initiate a multi-part upload")
|
||||
fs.Debugf(mu, "Initiating a multi-part upload")
|
||||
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.uploadID = rsp.UploadID
|
||||
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
ContentLength: &c.size,
|
||||
Body: c.buffer,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
|
||||
mu.mtx.Lock()
|
||||
defer mu.mtx.Unlock()
|
||||
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to list a multi-part")
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
|
||||
ObjectParts: mu.objectParts,
|
||||
ETag: &md5String,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to complete a multi-part")
|
||||
fs.Debugf(mu, "Completing multi-part object")
|
||||
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(mu, "Complete multi-part finished")
|
||||
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to abort a multi-part")
|
||||
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
|
||||
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
|
||||
}
|
||||
|
||||
@@ -392,6 +392,14 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var nextChunkLen int
|
||||
reader, nextChunkLen, err = mu.nextReader()
|
||||
if err != nil && err != io.EOF {
|
||||
// empty ch
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
// Wait for all goroutines finish
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
return err
|
||||
}
|
||||
if nextChunkLen == 0 && partNumber > 0 {
|
||||
|
||||
394
backend/s3/s3.go
394
backend/s3/s3.go
@@ -53,7 +53,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
@@ -61,6 +61,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "AWS",
|
||||
Help: "Amazon Web Services (AWS) S3",
|
||||
}, {
|
||||
Value: "Alibaba",
|
||||
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
@@ -76,6 +79,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -150,7 +156,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS",
|
||||
Provider: "!AWS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -269,10 +275,73 @@ func init() {
|
||||
Value: "s3.tor01.objectstorage.service.networklayer.com",
|
||||
Help: "Toronto Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OSS API.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "oss-cn-hangzhou.aliyuncs.com",
|
||||
Help: "East China 1 (Hangzhou)",
|
||||
}, {
|
||||
Value: "oss-cn-shanghai.aliyuncs.com",
|
||||
Help: "East China 2 (Shanghai)",
|
||||
}, {
|
||||
Value: "oss-cn-qingdao.aliyuncs.com",
|
||||
Help: "North China 1 (Qingdao)",
|
||||
}, {
|
||||
Value: "oss-cn-beijing.aliyuncs.com",
|
||||
Help: "North China 2 (Beijing)",
|
||||
}, {
|
||||
Value: "oss-cn-zhangjiakou.aliyuncs.com",
|
||||
Help: "North China 3 (Zhangjiakou)",
|
||||
}, {
|
||||
Value: "oss-cn-huhehaote.aliyuncs.com",
|
||||
Help: "North China 5 (Huhehaote)",
|
||||
}, {
|
||||
Value: "oss-cn-shenzhen.aliyuncs.com",
|
||||
Help: "South China 1 (Shenzhen)",
|
||||
}, {
|
||||
Value: "oss-cn-hongkong.aliyuncs.com",
|
||||
Help: "Hong Kong (Hong Kong)",
|
||||
}, {
|
||||
Value: "oss-us-west-1.aliyuncs.com",
|
||||
Help: "US West 1 (Silicon Valley)",
|
||||
}, {
|
||||
Value: "oss-us-east-1.aliyuncs.com",
|
||||
Help: "US East 1 (Virginia)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-1.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 1 (Singapore)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-2.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 2 (Sydney)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-3.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-5.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 5 (Jakarta)",
|
||||
}, {
|
||||
Value: "oss-ap-northeast-1.aliyuncs.com",
|
||||
Help: "Asia Pacific Northeast 1 (Japan)",
|
||||
}, {
|
||||
Value: "oss-ap-south-1.aliyuncs.com",
|
||||
Help: "Asia Pacific South 1 (Mumbai)",
|
||||
}, {
|
||||
Value: "oss-eu-central-1.aliyuncs.com",
|
||||
Help: "Central Europe 1 (Frankfurt)",
|
||||
}, {
|
||||
Value: "oss-eu-west-1.aliyuncs.com",
|
||||
Help: "West Europe (London)",
|
||||
}, {
|
||||
Value: "oss-me-east-1.aliyuncs.com",
|
||||
Help: "Middle East 1 (Dubai)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -291,7 +360,11 @@ func init() {
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi Object Storage",
|
||||
Help: "Wasabi US East endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-west-1.wasabisys.com",
|
||||
Help: "Wasabi US West endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
@@ -445,10 +518,17 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
@@ -490,6 +570,28 @@ func init() {
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: `Canned ACL used when creating buckets.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when only when creating buckets. If it
|
||||
isn't set then "acl" is used instead.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
@@ -534,13 +636,42 @@ func init() {
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in OSS.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this
|
||||
size. The default is 5MB. The minimum is 5MB.
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -568,7 +699,7 @@ concurrently.
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 2,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_path_style",
|
||||
@@ -598,14 +729,16 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -618,9 +751,11 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
@@ -642,6 +777,7 @@ type Fs struct {
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -690,7 +826,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var retryErrorCodes = []int{
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
// 409, // Conflict - various states that could be resolved on a retry
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
}
|
||||
|
||||
@@ -698,14 +834,13 @@ var retryErrorCodes = []int{
|
||||
// as it should notice closed connections and timeouts which are the most likely
|
||||
// sort of failure modes
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
|
||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retriable
|
||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||
return true, err
|
||||
}
|
||||
//Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if reqErr.StatusCode() == e {
|
||||
@@ -714,7 +849,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
//Ok, not an awserr, check for generic failure conditions
|
||||
// Ok, not an awserr, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
@@ -791,6 +926,9 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(opt.Region).
|
||||
WithMaxRetries(maxRetries).
|
||||
@@ -799,8 +937,21 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
}
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
|
||||
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
||||
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||
awsSessionOpts.Config.Credentials = nil
|
||||
}
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c := s3.New(ses)
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
@@ -832,6 +983,21 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -844,10 +1010,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
c, ses, err := s3Connection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -860,6 +1036,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -1205,7 +1382,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -1224,6 +1401,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1247,6 +1425,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
fs.Infof(f, "Bucket deleted")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1286,6 +1465,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
@@ -1533,38 +1713,46 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
modTime := src.ModTime()
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var uploader *s3manager.Uploader
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
if err == nil {
|
||||
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart {
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1573,30 +1761,98 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
// Create the request
|
||||
putObj, _ := o.fs.c.PutObjectRequest(&req)
|
||||
|
||||
// Sign it so we can upload using a presigned request.
|
||||
//
|
||||
// Note the SDK doesn't currently support streaming to
|
||||
// PutObject so we'll use this work-around.
|
||||
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
|
||||
// create the vanilla http request
|
||||
httpReq, err := http.NewRequest("PUT", url, in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
}
|
||||
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
|
||||
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
|
||||
@@ -23,4 +23,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -66,7 +66,22 @@ func init() {
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
@@ -122,6 +137,8 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
@@ -298,6 +315,18 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
|
||||
// variables afterwards.
|
||||
func shellExpand(s string) string {
|
||||
if s != "" {
|
||||
if s[0] == '~' {
|
||||
s = "${HOME}" + s[1:]
|
||||
}
|
||||
s = os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -325,8 +354,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
keyFile := shellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if opt.Pass == "" && opt.KeyFile == "" {
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -335,16 +365,46 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read public key file")
|
||||
}
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse public key file")
|
||||
}
|
||||
pubM := pub.Marshal()
|
||||
found := false
|
||||
for _, s := range signers {
|
||||
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.New("private key not found in the ssh-agent")
|
||||
}
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if opt.KeyFile != "" {
|
||||
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||
if keyFile != "" {
|
||||
key, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
clearpass := ""
|
||||
if opt.KeyFilePass != "" {
|
||||
clearpass, err = obscure.Reveal(opt.KeyFilePass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
@@ -594,12 +654,22 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
// delete recursively with RemoveDirectory
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
err = c.sftpClient.Remove(root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
@@ -769,6 +839,10 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -30,6 +31,7 @@ const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
@@ -41,6 +43,20 @@ Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_chunk",
|
||||
Help: `Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (eg using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
// Register with Fs
|
||||
@@ -173,6 +189,7 @@ type Options struct {
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -187,16 +204,20 @@ type Fs struct {
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers swift.Headers // The object headers if known
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
md5 string
|
||||
headers swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -227,6 +248,32 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
408, // Request Timeout
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
// If this is an swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if swiftError.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
@@ -337,6 +384,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -346,7 +394,11 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
info, _, err := f.c.Object(container, directory)
|
||||
var info swift.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, _, err = f.c.Object(container, directory)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -398,7 +450,10 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
@@ -436,7 +491,12 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
|
||||
}
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(container, opts)
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = f.c.Objects(container, opts)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
@@ -525,7 +585,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -586,7 +650,12 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
@@ -636,14 +705,20 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
@@ -660,7 +735,11 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.c.ContainerDelete(f.container)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerDelete(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
}
|
||||
@@ -719,7 +798,10 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -768,7 +850,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
fs.Debugf(o, "Returning empty Md5sum for swift large object")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
return strings.ToLower(o.md5), nil
|
||||
}
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
@@ -797,7 +879,22 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
o.md5 = info.Hash
|
||||
o.contentType = info.ContentType
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -809,15 +906,23 @@ func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
var info swift.Object
|
||||
var h swift.Headers
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info
|
||||
o.headers = h
|
||||
err = o.decodeMetaData(&info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -828,17 +933,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
@@ -861,7 +966,10 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
newHeaders[k] = v
|
||||
}
|
||||
}
|
||||
return o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
@@ -869,14 +977,17 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// It compares the Content-Type to directoryMarkerContentType - that
|
||||
// makes it a directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
return o.info.ContentType != directoryMarkerContentType
|
||||
return o.contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -903,13 +1014,20 @@ func (o *Object) removeSegments(except string) error {
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
@@ -938,13 +1056,19 @@ func urlEncode(str string) string {
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if o.fs.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -973,7 +1097,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -984,7 +1111,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
@@ -1014,17 +1144,31 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set Metadata since ObjectPut checked the hash and length so we know the
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1035,8 +1179,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.headers = nil // wipe old metadata
|
||||
// Read the metadata from the newly created object if necessary
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
@@ -1047,7 +1190,10 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1063,7 +1209,7 @@ func (o *Object) Remove() error {
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.info.ContentType
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -6,7 +6,10 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -145,8 +148,11 @@ var timeFormats = []string{
|
||||
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
|
||||
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
|
||||
noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574)
|
||||
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
|
||||
}
|
||||
|
||||
var oneTimeError sync.Once
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
@@ -170,5 +176,14 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
oneTimeError.Do(func() {
|
||||
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
|
||||
})
|
||||
// Return the epoch instead
|
||||
*t = Time(time.Unix(0, 0))
|
||||
// ignore error
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"github.com/ncw/rclone/backend/webdav/api"
|
||||
"github.com/ncw/rclone/backend/webdav/odrvcookie"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
@@ -96,10 +95,11 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
BearerToken string `config:"bearer_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -283,9 +283,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
bearerToken := config.FileGet(name, "bearer_token")
|
||||
if !strings.HasSuffix(opt.URL, "/") {
|
||||
opt.URL += "/"
|
||||
}
|
||||
@@ -320,10 +317,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if user != "" || pass != "" {
|
||||
if opt.User != "" || opt.Pass != "" {
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
} else if bearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
|
||||
} else if opt.BearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+opt.BearerToken)
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(opt.Vendor)
|
||||
@@ -604,10 +601,9 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
return f.mkdir(parent)
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// We assume the root is already ceated
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(dirPath string) error {
|
||||
// We assume the root is already created
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -620,20 +616,26 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
|
||||
return nil
|
||||
}
|
||||
// parent does not exists
|
||||
// parent does not exist
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
err = f.mkdir(dirPath)
|
||||
err = f._mkdir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -968,6 +970,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(src),
|
||||
}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders = map[string]string{
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//RootAddr is the base URL for Yandex Disk API.
|
||||
const RootAddr = "https://cloud-api.yandex.com" //also https://cloud-api.yandex.net and https://cloud-api.yandex.ru
|
||||
|
||||
func (c *Client) setRequestScope(req *http.Request) {
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.Header.Add("Authorization", "OAuth "+c.token)
|
||||
}
|
||||
|
||||
func (c *Client) scopedRequest(method, urlPath string, body io.Reader) (*http.Request, error) {
|
||||
fullURL := RootAddr
|
||||
if urlPath[:1] != "/" {
|
||||
fullURL += "/" + urlPath
|
||||
} else {
|
||||
fullURL += urlPath
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, fullURL, body)
|
||||
if err != nil {
|
||||
return req, err
|
||||
}
|
||||
|
||||
c.setRequestScope(req)
|
||||
return req, nil
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
//Client struct
|
||||
type Client struct {
|
||||
token string
|
||||
basePath string
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
//NewClient creates new client
|
||||
func NewClient(token string, client ...*http.Client) *Client {
|
||||
return newClientInternal(
|
||||
token,
|
||||
"https://cloud-api.yandex.com/v1/disk", //also "https://cloud-api.yandex.net/v1/disk" "https://cloud-api.yandex.ru/v1/disk"
|
||||
client...)
|
||||
}
|
||||
|
||||
func newClientInternal(token string, basePath string, client ...*http.Client) *Client {
|
||||
c := &Client{
|
||||
token: token,
|
||||
basePath: basePath,
|
||||
}
|
||||
if len(client) != 0 {
|
||||
c.HTTPClient = client[0]
|
||||
} else {
|
||||
c.HTTPClient = http.DefaultClient
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
//ErrorHandler type
|
||||
type ErrorHandler func(*http.Response) error
|
||||
|
||||
var defaultErrorHandler ErrorHandler = func(resp *http.Response) error {
|
||||
if resp.StatusCode/100 == 5 {
|
||||
return errors.New("server error")
|
||||
}
|
||||
|
||||
if resp.StatusCode/100 == 4 {
|
||||
var response DiskClientError
|
||||
contents, _ := ioutil.ReadAll(resp.Body)
|
||||
err := json.Unmarshal(contents, &response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
if resp.StatusCode/100 == 3 {
|
||||
return errors.New("redirect error")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (HTTPRequest *HTTPRequest) run(client *Client) ([]byte, error) {
|
||||
var err error
|
||||
values := make(url.Values)
|
||||
for k, v := range HTTPRequest.Parameters {
|
||||
values.Set(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
|
||||
var req *http.Request
|
||||
if HTTPRequest.Method == "POST" {
|
||||
// TODO json serialize
|
||||
req, err = http.NewRequest(
|
||||
"POST",
|
||||
client.basePath+HTTPRequest.Path,
|
||||
strings.NewReader(values.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO
|
||||
// req.Header.Set("Content-Type", "application/json")
|
||||
} else {
|
||||
req, err = http.NewRequest(
|
||||
HTTPRequest.Method,
|
||||
client.basePath+HTTPRequest.Path+"?"+values.Encode(),
|
||||
nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for headerName := range HTTPRequest.Headers {
|
||||
var headerValues = HTTPRequest.Headers[headerName]
|
||||
for _, headerValue := range headerValues {
|
||||
req.Header.Set(headerName, headerValue)
|
||||
}
|
||||
}
|
||||
return runRequest(client, req)
|
||||
}
|
||||
|
||||
func runRequest(client *Client, req *http.Request) ([]byte, error) {
|
||||
return runRequestWithErrorHandler(client, req, defaultErrorHandler)
|
||||
}
|
||||
|
||||
func runRequestWithErrorHandler(client *Client, req *http.Request, errorHandler ErrorHandler) (out []byte, err error) {
|
||||
resp, err := client.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
return checkResponseForErrorsWithErrorHandler(resp, errorHandler)
|
||||
}
|
||||
|
||||
func checkResponseForErrorsWithErrorHandler(resp *http.Response, errorHandler ErrorHandler) ([]byte, error) {
|
||||
if resp.StatusCode/100 > 2 {
|
||||
return nil, errorHandler(resp)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
// CheckClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func CheckClose(c io.Closer, err *error) {
|
||||
cerr := c.Close()
|
||||
if *err == nil {
|
||||
*err = cerr
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
//CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
||||
type CustomPropertyResponse struct {
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
}
|
||||
|
||||
//SetCustomProperty will set specified data from Yandex Disk
|
||||
func (c *Client) SetCustomProperty(remotePath string, property string, value string) error {
|
||||
rcm := map[string]interface{}{
|
||||
property: value,
|
||||
}
|
||||
cpr := CustomPropertyResponse{rcm}
|
||||
data, _ := json.Marshal(cpr)
|
||||
body := bytes.NewReader(data)
|
||||
err := c.SetCustomPropertyRequest(remotePath, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
//SetCustomPropertyRequest will make an CustomProperty request and return a URL to CustomProperty data to.
|
||||
func (c *Client) SetCustomPropertyRequest(remotePath string, body io.Reader) (err error) {
|
||||
values := url.Values{}
|
||||
values.Add("path", remotePath)
|
||||
req, err := c.scopedRequest("PATCH", "/v1/disk/resources?"+values.Encode(), body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := CheckAPIError(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
//If needed we can read response and check if custom_property is set.
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Delete will remove specified file/folder from Yandex Disk
|
||||
func (c *Client) Delete(remotePath string, permanently bool) error {
|
||||
|
||||
values := url.Values{}
|
||||
values.Add("permanently", strconv.FormatBool(permanently))
|
||||
values.Add("path", remotePath)
|
||||
urlPath := "/v1/disk/resources?" + values.Encode()
|
||||
fullURL := RootAddr
|
||||
if urlPath[:1] != "/" {
|
||||
fullURL += "/" + urlPath
|
||||
} else {
|
||||
fullURL += urlPath
|
||||
}
|
||||
|
||||
return c.PerformDelete(fullURL)
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package src
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
//DiskInfoRequest type
|
||||
type DiskInfoRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
func (req *DiskInfoRequest) request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
//DiskInfoResponse struct is returned by the API for DiskInfo request.
|
||||
type DiskInfoResponse struct {
|
||||
TrashSize uint64 `json:"TrashSize"`
|
||||
TotalSpace uint64 `json:"TotalSpace"`
|
||||
UsedSpace uint64 `json:"UsedSpace"`
|
||||
SystemFolders map[string]string `json:"SystemFolders"`
|
||||
}
|
||||
|
||||
//NewDiskInfoRequest create new DiskInfo Request
|
||||
func (c *Client) NewDiskInfoRequest() *DiskInfoRequest {
|
||||
return &DiskInfoRequest{
|
||||
client: c,
|
||||
HTTPRequest: createGetRequest(c, "/", nil),
|
||||
}
|
||||
}
|
||||
|
||||
//Exec run DiskInfo Request
|
||||
func (req *DiskInfoRequest) Exec() (*DiskInfoResponse, error) {
|
||||
data, err := req.request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info DiskInfoResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.SystemFolders == nil {
|
||||
info.SystemFolders = make(map[string]string)
|
||||
}
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// DownloadResponse struct is returned by the API for Download request.
|
||||
type DownloadResponse struct {
|
||||
HRef string `json:"href"`
|
||||
Method string `json:"method"`
|
||||
Templated bool `json:"templated"`
|
||||
}
|
||||
|
||||
// Download will get specified data from Yandex.Disk supplying the extra headers
|
||||
func (c *Client) Download(remotePath string, headers map[string]string) (io.ReadCloser, error) { //io.Writer
|
||||
ur, err := c.DownloadRequest(remotePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.PerformDownload(ur.HRef, headers)
|
||||
}
|
||||
|
||||
// DownloadRequest will make an download request and return a URL to download data to.
|
||||
func (c *Client) DownloadRequest(remotePath string) (ur *DownloadResponse, err error) {
|
||||
values := url.Values{}
|
||||
values.Add("path", remotePath)
|
||||
|
||||
req, err := c.scopedRequest("GET", "/v1/disk/resources/download?"+values.Encode(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := CheckAPIError(resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
ur, err = ParseDownloadResponse(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ur, nil
|
||||
}
|
||||
|
||||
// ParseDownloadResponse tries to read and parse DownloadResponse struct.
|
||||
func ParseDownloadResponse(data io.Reader) (*DownloadResponse, error) {
|
||||
dec := json.NewDecoder(data)
|
||||
var ur DownloadResponse
|
||||
|
||||
if err := dec.Decode(&ur); err == io.EOF {
|
||||
// ok
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if there is any trash data after JSON and crash if there is.
|
||||
|
||||
return &ur, nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package src
|
||||
|
||||
// EmptyTrash will permanently delete all trashed files/folders from Yandex Disk
|
||||
func (c *Client) EmptyTrash() error {
|
||||
fullURL := RootAddr
|
||||
fullURL += "/v1/disk/trash/resources"
|
||||
|
||||
return c.PerformDelete(fullURL)
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ErrorResponse represents erroneous API response.
|
||||
// Implements go's built in `error`.
|
||||
type ErrorResponse struct {
|
||||
ErrorName string `json:"error"`
|
||||
Description string `json:"description"`
|
||||
Message string `json:"message"`
|
||||
|
||||
StatusCode int `json:""`
|
||||
}
|
||||
|
||||
func (e *ErrorResponse) Error() string {
|
||||
return fmt.Sprintf("[%d - %s] %s (%s)", e.StatusCode, e.ErrorName, e.Description, e.Message)
|
||||
}
|
||||
|
||||
// ProccessErrorResponse tries to represent data passed as
|
||||
// an ErrorResponse object.
|
||||
func ProccessErrorResponse(data io.Reader) (*ErrorResponse, error) {
|
||||
dec := json.NewDecoder(data)
|
||||
var errorResponse ErrorResponse
|
||||
|
||||
if err := dec.Decode(&errorResponse); err == io.EOF {
|
||||
// ok
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if there is any trash data after JSON and crash if there is.
|
||||
|
||||
return &errorResponse, nil
|
||||
}
|
||||
|
||||
// CheckAPIError is a convenient function to turn erroneous
|
||||
// API response into go error. It closes the Body on error.
|
||||
func CheckAPIError(resp *http.Response) (err error) {
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
errorResponse, err := ProccessErrorResponse(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errorResponse.StatusCode = resp.StatusCode
|
||||
|
||||
return errorResponse
|
||||
}
|
||||
|
||||
// ProccessErrorString tries to represent data passed as
|
||||
// an ErrorResponse object.
|
||||
func ProccessErrorString(data string) (*ErrorResponse, error) {
|
||||
var errorResponse ErrorResponse
|
||||
if err := json.Unmarshal([]byte(data), &errorResponse); err == nil {
|
||||
// ok
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if there is any trash data after JSON and crash if there is.
|
||||
|
||||
return &errorResponse, nil
|
||||
}
|
||||
|
||||
// ParseAPIError Parse json error response from API
|
||||
func (c *Client) ParseAPIError(jsonErr string) (string, error) { //ErrorName
|
||||
errorResponse, err := ProccessErrorString(jsonErr)
|
||||
if err != nil {
|
||||
return err.Error(), err
|
||||
}
|
||||
|
||||
return errorResponse.ErrorName, nil
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package src
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
//DiskClientError struct
|
||||
type DiskClientError struct {
|
||||
Description string `json:"Description"`
|
||||
Code string `json:"Error"`
|
||||
}
|
||||
|
||||
func (e DiskClientError) Error() string {
|
||||
b, _ := json.Marshal(e)
|
||||
return string(b)
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package src
|
||||
|
||||
// FilesResourceListResponse struct is returned by the API for requests.
|
||||
type FilesResourceListResponse struct {
|
||||
Items []ResourceInfoResponse `json:"items"`
|
||||
Limit *uint64 `json:"limit"`
|
||||
Offset *uint64 `json:"offset"`
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FlatFileListRequest struct client for FlatFileList Request
|
||||
type FlatFileListRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
// FlatFileListRequestOptions struct - options for request
|
||||
type FlatFileListRequestOptions struct {
|
||||
MediaType []MediaType
|
||||
Limit *uint32
|
||||
Offset *uint32
|
||||
Fields []string
|
||||
PreviewSize *PreviewSize
|
||||
PreviewCrop *bool
|
||||
}
|
||||
|
||||
// Request get request
|
||||
func (req *FlatFileListRequest) Request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
// NewFlatFileListRequest create new FlatFileList Request
|
||||
func (c *Client) NewFlatFileListRequest(options ...FlatFileListRequestOptions) *FlatFileListRequest {
|
||||
var parameters = make(map[string]interface{})
|
||||
if len(options) > 0 {
|
||||
opt := options[0]
|
||||
if opt.Limit != nil {
|
||||
parameters["limit"] = *opt.Limit
|
||||
}
|
||||
if opt.Offset != nil {
|
||||
parameters["offset"] = *opt.Offset
|
||||
}
|
||||
if opt.Fields != nil {
|
||||
parameters["fields"] = strings.Join(opt.Fields, ",")
|
||||
}
|
||||
if opt.PreviewSize != nil {
|
||||
parameters["preview_size"] = opt.PreviewSize.String()
|
||||
}
|
||||
if opt.PreviewCrop != nil {
|
||||
parameters["preview_crop"] = *opt.PreviewCrop
|
||||
}
|
||||
if opt.MediaType != nil {
|
||||
var strMediaTypes = make([]string, len(opt.MediaType))
|
||||
for i, t := range opt.MediaType {
|
||||
strMediaTypes[i] = t.String()
|
||||
}
|
||||
parameters["media_type"] = strings.Join(strMediaTypes, ",")
|
||||
}
|
||||
}
|
||||
return &FlatFileListRequest{
|
||||
client: c,
|
||||
HTTPRequest: createGetRequest(c, "/resources/files", parameters),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec run FlatFileList Request
|
||||
func (req *FlatFileListRequest) Exec() (*FilesResourceListResponse, error) {
|
||||
data, err := req.Request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var info FilesResourceListResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cap(info.Items) == 0 {
|
||||
info.Items = []ResourceInfoResponse{}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package src
|
||||
|
||||
// HTTPRequest struct
|
||||
type HTTPRequest struct {
|
||||
Method string
|
||||
Path string
|
||||
Parameters map[string]interface{}
|
||||
Headers map[string][]string
|
||||
}
|
||||
|
||||
func createGetRequest(client *Client, path string, params map[string]interface{}) *HTTPRequest {
|
||||
return createRequest(client, "GET", path, params)
|
||||
}
|
||||
|
||||
func createRequest(client *Client, method string, path string, parameters map[string]interface{}) *HTTPRequest {
|
||||
var headers = make(map[string][]string)
|
||||
headers["Authorization"] = []string{"OAuth " + client.token}
|
||||
return &HTTPRequest{
|
||||
Method: method,
|
||||
Path: path,
|
||||
Parameters: parameters,
|
||||
Headers: headers,
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package src
|
||||
|
||||
// LastUploadedResourceListResponse struct
|
||||
type LastUploadedResourceListResponse struct {
|
||||
Items []ResourceInfoResponse `json:"items"`
|
||||
Limit *uint64 `json:"limit"`
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// LastUploadedResourceListRequest struct
|
||||
type LastUploadedResourceListRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
// LastUploadedResourceListRequestOptions struct
|
||||
type LastUploadedResourceListRequestOptions struct {
|
||||
MediaType []MediaType
|
||||
Limit *uint32
|
||||
Fields []string
|
||||
PreviewSize *PreviewSize
|
||||
PreviewCrop *bool
|
||||
}
|
||||
|
||||
// Request return request
|
||||
func (req *LastUploadedResourceListRequest) Request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
// NewLastUploadedResourceListRequest create new LastUploadedResourceList Request
|
||||
func (c *Client) NewLastUploadedResourceListRequest(options ...LastUploadedResourceListRequestOptions) *LastUploadedResourceListRequest {
|
||||
var parameters = make(map[string]interface{})
|
||||
if len(options) > 0 {
|
||||
opt := options[0]
|
||||
if opt.Limit != nil {
|
||||
parameters["limit"] = opt.Limit
|
||||
}
|
||||
if opt.Fields != nil {
|
||||
parameters["fields"] = strings.Join(opt.Fields, ",")
|
||||
}
|
||||
if opt.PreviewSize != nil {
|
||||
parameters["preview_size"] = opt.PreviewSize.String()
|
||||
}
|
||||
if opt.PreviewCrop != nil {
|
||||
parameters["preview_crop"] = opt.PreviewCrop
|
||||
}
|
||||
if opt.MediaType != nil {
|
||||
var strMediaTypes = make([]string, len(opt.MediaType))
|
||||
for i, t := range opt.MediaType {
|
||||
strMediaTypes[i] = t.String()
|
||||
}
|
||||
parameters["media_type"] = strings.Join(strMediaTypes, ",")
|
||||
}
|
||||
}
|
||||
return &LastUploadedResourceListRequest{
|
||||
client: c,
|
||||
HTTPRequest: createGetRequest(c, "/resources/last-uploaded", parameters),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec run LastUploadedResourceList Request
|
||||
func (req *LastUploadedResourceListRequest) Exec() (*LastUploadedResourceListResponse, error) {
|
||||
data, err := req.Request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var info LastUploadedResourceListResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cap(info.Items) == 0 {
|
||||
info.Items = []ResourceInfoResponse{}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package src
|
||||
|
||||
// MediaType struct - media types
|
||||
type MediaType struct {
|
||||
mediaType string
|
||||
}
|
||||
|
||||
// Audio - media type
|
||||
func (m *MediaType) Audio() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "audio",
|
||||
}
|
||||
}
|
||||
|
||||
// Backup - media type
|
||||
func (m *MediaType) Backup() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "backup",
|
||||
}
|
||||
}
|
||||
|
||||
// Book - media type
|
||||
func (m *MediaType) Book() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "book",
|
||||
}
|
||||
}
|
||||
|
||||
// Compressed - media type
|
||||
func (m *MediaType) Compressed() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "compressed",
|
||||
}
|
||||
}
|
||||
|
||||
// Data - media type
|
||||
func (m *MediaType) Data() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "data",
|
||||
}
|
||||
}
|
||||
|
||||
// Development - media type
|
||||
func (m *MediaType) Development() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "development",
|
||||
}
|
||||
}
|
||||
|
||||
// Diskimage - media type
|
||||
func (m *MediaType) Diskimage() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "diskimage",
|
||||
}
|
||||
}
|
||||
|
||||
// Document - media type
|
||||
func (m *MediaType) Document() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "document",
|
||||
}
|
||||
}
|
||||
|
||||
// Encoded - media type
|
||||
func (m *MediaType) Encoded() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "encoded",
|
||||
}
|
||||
}
|
||||
|
||||
// Executable - media type
|
||||
func (m *MediaType) Executable() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "executable",
|
||||
}
|
||||
}
|
||||
|
||||
// Flash - media type
|
||||
func (m *MediaType) Flash() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "flash",
|
||||
}
|
||||
}
|
||||
|
||||
// Font - media type
|
||||
func (m *MediaType) Font() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "font",
|
||||
}
|
||||
}
|
||||
|
||||
// Image - media type
|
||||
func (m *MediaType) Image() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "image",
|
||||
}
|
||||
}
|
||||
|
||||
// Settings - media type
|
||||
func (m *MediaType) Settings() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "settings",
|
||||
}
|
||||
}
|
||||
|
||||
// Spreadsheet - media type
|
||||
func (m *MediaType) Spreadsheet() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "spreadsheet",
|
||||
}
|
||||
}
|
||||
|
||||
// Text - media type
|
||||
func (m *MediaType) Text() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "text",
|
||||
}
|
||||
}
|
||||
|
||||
// Unknown - media type
|
||||
func (m *MediaType) Unknown() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "unknown",
|
||||
}
|
||||
}
|
||||
|
||||
// Video - media type
|
||||
func (m *MediaType) Video() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "video",
|
||||
}
|
||||
}
|
||||
|
||||
// Web - media type
|
||||
func (m *MediaType) Web() *MediaType {
|
||||
return &MediaType{
|
||||
mediaType: "web",
|
||||
}
|
||||
}
|
||||
|
||||
// String - media type
|
||||
func (m *MediaType) String() string {
|
||||
return m.mediaType
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Mkdir will make specified folder on Yandex Disk
|
||||
func (c *Client) Mkdir(remotePath string) (int, string, error) {
|
||||
|
||||
values := url.Values{}
|
||||
values.Add("path", remotePath) // only one current folder will be created. Not all the folders in the path.
|
||||
urlPath := "/v1/disk/resources?" + values.Encode()
|
||||
fullURL := RootAddr
|
||||
if urlPath[:1] != "/" {
|
||||
fullURL += "/" + urlPath
|
||||
} else {
|
||||
fullURL += urlPath
|
||||
}
|
||||
|
||||
return c.PerformMkdir(fullURL)
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformDelete does the actual delete via DELETE request.
|
||||
func (c *Client) PerformDelete(url string) error {
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//set access token and headers
|
||||
c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//204 - resource deleted.
|
||||
//202 - folder not empty, content will be deleted soon (async delete).
|
||||
if resp.StatusCode != 204 && resp.StatusCode != 202 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformDownload does the actual download via unscoped GET request.
|
||||
func (c *Client) PerformDownload(url string, headers map[string]string) (out io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set any extra headers
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
//c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, isRanging := req.Header["Range"]
|
||||
if !(resp.StatusCode == http.StatusOK || (isRanging && resp.StatusCode == http.StatusPartialContent)) {
|
||||
defer CheckClose(resp.Body, &err)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package src
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformMkdir does the actual mkdir via PUT request.
|
||||
func (c *Client) PerformMkdir(url string) (int, string, error) {
|
||||
req, err := http.NewRequest("PUT", url, nil)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
//set access token and headers
|
||||
c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
//third parameter is the json error response body
|
||||
return resp.StatusCode, string(body), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return resp.StatusCode, "", nil
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PerformUpload does the actual upload via unscoped PUT request.
|
||||
func (c *Client) PerformUpload(url string, data io.Reader, contentType string) (err error) {
|
||||
req, err := http.NewRequest("PUT", url, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
|
||||
//c.setRequestScope(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package src
|
||||
|
||||
import "fmt"
|
||||
|
||||
// PreviewSize struct
|
||||
type PreviewSize struct {
|
||||
size string
|
||||
}
|
||||
|
||||
// PredefinedSizeS - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeS() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "S",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeM - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeM() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "M",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "L",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeXL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeXL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "XL",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeXXL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeXXL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "XXL",
|
||||
}
|
||||
}
|
||||
|
||||
// PredefinedSizeXXXL - set preview size
|
||||
func (s *PreviewSize) PredefinedSizeXXXL() *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: "XXXL",
|
||||
}
|
||||
}
|
||||
|
||||
// ExactWidth - set preview size
|
||||
func (s *PreviewSize) ExactWidth(width uint32) *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: fmt.Sprintf("%dx", width),
|
||||
}
|
||||
}
|
||||
|
||||
// ExactHeight - set preview size
|
||||
func (s *PreviewSize) ExactHeight(height uint32) *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: fmt.Sprintf("x%d", height),
|
||||
}
|
||||
}
|
||||
|
||||
// ExactSize - set preview size
|
||||
func (s *PreviewSize) ExactSize(width uint32, height uint32) *PreviewSize {
|
||||
return &PreviewSize{
|
||||
size: fmt.Sprintf("%dx%d", width, height),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PreviewSize) String() string {
|
||||
return s.size
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package src
|
||||
|
||||
//ResourceInfoResponse struct is returned by the API for metedata requests.
|
||||
type ResourceInfoResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
Preview string `json:"preview"`
|
||||
PublicURL string `json:"public_url"`
|
||||
OriginPath string `json:"origin_path"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
Md5 string `json:"md5"`
|
||||
ResourceType string `json:"type"`
|
||||
MimeType string `json:"mime_type"`
|
||||
Size uint64 `json:"size"`
|
||||
Embedded *ResourceListResponse `json:"_embedded"`
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package src
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// ResourceInfoRequest struct
|
||||
type ResourceInfoRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
// Request of ResourceInfoRequest
|
||||
func (req *ResourceInfoRequest) Request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
// NewResourceInfoRequest create new ResourceInfo Request
|
||||
func (c *Client) NewResourceInfoRequest(path string, options ...ResourceInfoRequestOptions) *ResourceInfoRequest {
|
||||
return &ResourceInfoRequest{
|
||||
client: c,
|
||||
HTTPRequest: createResourceInfoRequest(c, "/resources", path, options...),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec run ResourceInfo Request
|
||||
func (req *ResourceInfoRequest) Exec() (*ResourceInfoResponse, error) {
|
||||
data, err := req.Request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info ResourceInfoResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.CustomProperties == nil {
|
||||
info.CustomProperties = make(map[string]interface{})
|
||||
}
|
||||
if info.Embedded != nil {
|
||||
if cap(info.Embedded.Items) == 0 {
|
||||
info.Embedded.Items = []ResourceInfoResponse{}
|
||||
}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package src
|
||||
|
||||
import "strings"
|
||||
|
||||
func createResourceInfoRequest(c *Client,
|
||||
apiPath string,
|
||||
path string,
|
||||
options ...ResourceInfoRequestOptions) *HTTPRequest {
|
||||
var parameters = make(map[string]interface{})
|
||||
parameters["path"] = path
|
||||
if len(options) > 0 {
|
||||
opt := options[0]
|
||||
if opt.SortMode != nil {
|
||||
parameters["sort"] = opt.SortMode.String()
|
||||
}
|
||||
if opt.Limit != nil {
|
||||
parameters["limit"] = *opt.Limit
|
||||
}
|
||||
if opt.Offset != nil {
|
||||
parameters["offset"] = *opt.Offset
|
||||
}
|
||||
if opt.Fields != nil {
|
||||
parameters["fields"] = strings.Join(opt.Fields, ",")
|
||||
}
|
||||
if opt.PreviewSize != nil {
|
||||
parameters["preview_size"] = opt.PreviewSize.String()
|
||||
}
|
||||
if opt.PreviewCrop != nil {
|
||||
parameters["preview_crop"] = *opt.PreviewCrop
|
||||
}
|
||||
}
|
||||
return createGetRequest(c, apiPath, parameters)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package src
|
||||
|
||||
// ResourceInfoRequestOptions struct
|
||||
type ResourceInfoRequestOptions struct {
|
||||
SortMode *SortMode
|
||||
Limit *uint32
|
||||
Offset *uint32
|
||||
Fields []string
|
||||
PreviewSize *PreviewSize
|
||||
PreviewCrop *bool
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package src
|
||||
|
||||
// ResourceListResponse struct
|
||||
type ResourceListResponse struct {
|
||||
Sort *SortMode `json:"sort"`
|
||||
PublicKey string `json:"public_key"`
|
||||
Items []ResourceInfoResponse `json:"items"`
|
||||
Path string `json:"path"`
|
||||
Limit *uint64 `json:"limit"`
|
||||
Offset *uint64 `json:"offset"`
|
||||
Total *uint64 `json:"total"`
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package src
|
||||
|
||||
import "strings"
|
||||
|
||||
// SortMode struct - sort mode
|
||||
type SortMode struct {
|
||||
mode string
|
||||
}
|
||||
|
||||
// Default - sort mode
|
||||
func (m *SortMode) Default() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "",
|
||||
}
|
||||
}
|
||||
|
||||
// ByName - sort mode
|
||||
func (m *SortMode) ByName() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "name",
|
||||
}
|
||||
}
|
||||
|
||||
// ByPath - sort mode
|
||||
func (m *SortMode) ByPath() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "path",
|
||||
}
|
||||
}
|
||||
|
||||
// ByCreated - sort mode
|
||||
func (m *SortMode) ByCreated() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "created",
|
||||
}
|
||||
}
|
||||
|
||||
// ByModified - sort mode
|
||||
func (m *SortMode) ByModified() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "modified",
|
||||
}
|
||||
}
|
||||
|
||||
// BySize - sort mode
|
||||
func (m *SortMode) BySize() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "size",
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse - sort mode
|
||||
func (m *SortMode) Reverse() *SortMode {
|
||||
if strings.HasPrefix(m.mode, "-") {
|
||||
return &SortMode{
|
||||
mode: m.mode[1:],
|
||||
}
|
||||
}
|
||||
return &SortMode{
|
||||
mode: "-" + m.mode,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *SortMode) String() string {
|
||||
return m.mode
|
||||
}
|
||||
|
||||
// UnmarshalJSON sort mode
|
||||
func (m *SortMode) UnmarshalJSON(value []byte) error {
|
||||
if value == nil || len(value) == 0 {
|
||||
m.mode = ""
|
||||
return nil
|
||||
}
|
||||
m.mode = string(value)
|
||||
if strings.HasPrefix(m.mode, "\"") && strings.HasSuffix(m.mode, "\"") {
|
||||
m.mode = m.mode[1 : len(m.mode)-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package src
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// TrashResourceInfoRequest struct
|
||||
type TrashResourceInfoRequest struct {
|
||||
client *Client
|
||||
HTTPRequest *HTTPRequest
|
||||
}
|
||||
|
||||
// Request of TrashResourceInfoRequest struct
|
||||
func (req *TrashResourceInfoRequest) Request() *HTTPRequest {
|
||||
return req.HTTPRequest
|
||||
}
|
||||
|
||||
// NewTrashResourceInfoRequest create new TrashResourceInfo Request
|
||||
func (c *Client) NewTrashResourceInfoRequest(path string, options ...ResourceInfoRequestOptions) *TrashResourceInfoRequest {
|
||||
return &TrashResourceInfoRequest{
|
||||
client: c,
|
||||
HTTPRequest: createResourceInfoRequest(c, "/trash/resources", path, options...),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec run TrashResourceInfo Request
|
||||
func (req *TrashResourceInfoRequest) Exec() (*ResourceInfoResponse, error) {
|
||||
data, err := req.Request().run(req.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info ResourceInfoResponse
|
||||
err = json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.CustomProperties == nil {
|
||||
info.CustomProperties = make(map[string]interface{})
|
||||
}
|
||||
if info.Embedded != nil {
|
||||
if cap(info.Embedded.Items) == 0 {
|
||||
info.Embedded.Items = []ResourceInfoResponse{}
|
||||
}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
157
backend/yandex/api/types.go
Normal file
157
backend/yandex/api/types.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DiskInfo contains disk metadata
|
||||
type DiskInfo struct {
|
||||
TotalSpace int64 `json:"total_space"`
|
||||
UsedSpace int64 `json:"used_space"`
|
||||
TrashSize int64 `json:"trash_size"`
|
||||
}
|
||||
|
||||
// ResourceInfoRequestOptions struct
|
||||
type ResourceInfoRequestOptions struct {
|
||||
SortMode *SortMode
|
||||
Limit uint64
|
||||
Offset uint64
|
||||
Fields []string
|
||||
}
|
||||
|
||||
//ResourceInfoResponse struct is returned by the API for metedata requests.
|
||||
type ResourceInfoResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
Preview string `json:"preview"`
|
||||
PublicURL string `json:"public_url"`
|
||||
OriginPath string `json:"origin_path"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
Md5 string `json:"md5"`
|
||||
ResourceType string `json:"type"`
|
||||
MimeType string `json:"mime_type"`
|
||||
Size int64 `json:"size"`
|
||||
Embedded *ResourceListResponse `json:"_embedded"`
|
||||
}
|
||||
|
||||
// ResourceListResponse struct
|
||||
type ResourceListResponse struct {
|
||||
Sort *SortMode `json:"sort"`
|
||||
PublicKey string `json:"public_key"`
|
||||
Items []ResourceInfoResponse `json:"items"`
|
||||
Path string `json:"path"`
|
||||
Limit *uint64 `json:"limit"`
|
||||
Offset *uint64 `json:"offset"`
|
||||
Total *uint64 `json:"total"`
|
||||
}
|
||||
|
||||
// AsyncInfo struct is returned by the API for various async operations.
|
||||
type AsyncInfo struct {
|
||||
HRef string `json:"href"`
|
||||
Method string `json:"method"`
|
||||
Templated bool `json:"templated"`
|
||||
}
|
||||
|
||||
// AsyncStatus is returned when requesting the status of an async operations. Possble values in-progress, success, failure
|
||||
type AsyncStatus struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
//CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
||||
type CustomPropertyResponse struct {
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
}
|
||||
|
||||
// SortMode struct - sort mode
|
||||
type SortMode struct {
|
||||
mode string
|
||||
}
|
||||
|
||||
// Default - sort mode
|
||||
func (m *SortMode) Default() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "",
|
||||
}
|
||||
}
|
||||
|
||||
// ByName - sort mode
|
||||
func (m *SortMode) ByName() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "name",
|
||||
}
|
||||
}
|
||||
|
||||
// ByPath - sort mode
|
||||
func (m *SortMode) ByPath() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "path",
|
||||
}
|
||||
}
|
||||
|
||||
// ByCreated - sort mode
|
||||
func (m *SortMode) ByCreated() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "created",
|
||||
}
|
||||
}
|
||||
|
||||
// ByModified - sort mode
|
||||
func (m *SortMode) ByModified() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "modified",
|
||||
}
|
||||
}
|
||||
|
||||
// BySize - sort mode
|
||||
func (m *SortMode) BySize() *SortMode {
|
||||
return &SortMode{
|
||||
mode: "size",
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse - sort mode
|
||||
func (m *SortMode) Reverse() *SortMode {
|
||||
if strings.HasPrefix(m.mode, "-") {
|
||||
return &SortMode{
|
||||
mode: m.mode[1:],
|
||||
}
|
||||
}
|
||||
return &SortMode{
|
||||
mode: "-" + m.mode,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *SortMode) String() string {
|
||||
return m.mode
|
||||
}
|
||||
|
||||
// UnmarshalJSON sort mode
|
||||
func (m *SortMode) UnmarshalJSON(value []byte) error {
|
||||
if value == nil || len(value) == 0 {
|
||||
m.mode = ""
|
||||
return nil
|
||||
}
|
||||
m.mode = string(value)
|
||||
if strings.HasPrefix(m.mode, "\"") && strings.HasSuffix(m.mode, "\"") {
|
||||
m.mode = m.mode[1 : len(m.mode)-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ErrorResponse represents erroneous API response.
|
||||
// Implements go's built in `error`.
|
||||
type ErrorResponse struct {
|
||||
ErrorName string `json:"error"`
|
||||
Description string `json:"description"`
|
||||
Message string `json:"message"`
|
||||
|
||||
StatusCode int `json:""`
|
||||
}
|
||||
|
||||
func (e *ErrorResponse) Error() string {
|
||||
return fmt.Sprintf("[%d - %s] %s (%s)", e.StatusCode, e.ErrorName, e.Description, e.Message)
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package src
|
||||
|
||||
//from yadisk
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// UploadResponse struct is returned by the API for upload request.
|
||||
type UploadResponse struct {
|
||||
HRef string `json:"href"`
|
||||
Method string `json:"method"`
|
||||
Templated bool `json:"templated"`
|
||||
}
|
||||
|
||||
// Upload will put specified data to Yandex.Disk.
|
||||
func (c *Client) Upload(data io.Reader, remotePath string, overwrite bool, contentType string) error {
|
||||
ur, err := c.UploadRequest(remotePath, overwrite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.PerformUpload(ur.HRef, data, contentType)
|
||||
}
|
||||
|
||||
// UploadRequest will make an upload request and return a URL to upload data to.
|
||||
func (c *Client) UploadRequest(remotePath string, overwrite bool) (ur *UploadResponse, err error) {
|
||||
values := url.Values{}
|
||||
values.Add("path", remotePath)
|
||||
values.Add("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
req, err := c.scopedRequest("GET", "/v1/disk/resources/upload?"+values.Encode(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := CheckAPIError(resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer CheckClose(resp.Body, &err)
|
||||
|
||||
ur, err = ParseUploadResponse(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ur, nil
|
||||
}
|
||||
|
||||
// ParseUploadResponse tries to read and parse UploadResponse struct.
|
||||
func ParseUploadResponse(data io.Reader) (*UploadResponse, error) {
|
||||
dec := json.NewDecoder(data)
|
||||
var ur UploadResponse
|
||||
|
||||
if err := dec.Decode(&ur); err == io.EOF {
|
||||
// ok
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if there is any trash data after JSON and crash if there is.
|
||||
|
||||
return &ur, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
5
bin/build-xgo-cgofuse.sh
Executable file
5
bin/build-xgo-cgofuse.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
|
||||
docker images
|
||||
docker push rclone/xgo-cgofuse
|
||||
@@ -63,7 +63,9 @@ var osarches = []string{
|
||||
|
||||
// Special environment flags for a given arch
|
||||
var archFlags = map[string][]string{
|
||||
"386": {"GO386=387"},
|
||||
"386": {"GO386=387"},
|
||||
"mips": {"GOMIPS=softfloat"},
|
||||
"mipsle": {"GOMIPS=softfloat"},
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python2
|
||||
"""
|
||||
Make backend documentation
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python2
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
|
||||
@@ -4,18 +4,20 @@
|
||||
set -e
|
||||
|
||||
go install
|
||||
mkdir -p /tmp/rclone_cache_test
|
||||
mkdir -p /tmp/rclone/cache_test
|
||||
mkdir -p /tmp/rclone/rc_mount
|
||||
export RCLONE_CONFIG_RCDOCS_TYPE=cache
|
||||
export RCLONE_CONFIG_RCDOCS_REMOTE=/tmp/rclone/cache_test
|
||||
rclone -q --rc mount rcdocs: /mnt/tmp/ &
|
||||
rclone -q --rc mount rcdocs: /tmp/rclone/rc_mount &
|
||||
sleep 0.5
|
||||
rclone rc > /tmp/z.md
|
||||
fusermount -z -u /mnt/tmp/
|
||||
rclone rc > /tmp/rclone/z.md
|
||||
fusermount -u -z /tmp/rclone/rc_mount > /dev/null 2>&1 || umount /tmp/rclone/rc_mount
|
||||
|
||||
awk '
|
||||
BEGIN {p=1}
|
||||
/^<!--- autogenerated start/ {print;system("cat /tmp/z.md");p=0}
|
||||
/^<!--- autogenerated start/ {print;system("cat /tmp/rclone/z.md");p=0}
|
||||
/^<!--- autogenerated stop/ {p=1}
|
||||
p' docs/content/rc.md > /tmp/rc.md
|
||||
p' docs/content/rc.md > /tmp/rclone/rc.md
|
||||
|
||||
mv /tmp/rc.md docs/content/rc.md
|
||||
mv /tmp/rclone/rc.md docs/content/rc.md
|
||||
rm -rf /tmp/rclone
|
||||
|
||||
59
bin/test_independence.go
Normal file
59
bin/test_independence.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// +build ignore
|
||||
|
||||
// Test that the tests in the suite passed in are independent
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var matchLine = regexp.MustCompile(`(?m)^=== RUN\s*(TestIntegration/\S*)\s*$`)
|
||||
|
||||
// run the test pass in and grep out the test names
|
||||
func findTests(packageToTest string) (tests []string) {
|
||||
cmd := exec.Command("go", "test", "-v", packageToTest)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
_, _ = os.Stderr.Write(out)
|
||||
log.Fatal(err)
|
||||
}
|
||||
results := matchLine.FindAllSubmatch(out, -1)
|
||||
if results == nil {
|
||||
log.Fatal("No tests found")
|
||||
}
|
||||
for _, line := range results {
|
||||
tests = append(tests, string(line[1]))
|
||||
}
|
||||
return tests
|
||||
}
|
||||
|
||||
// run the test passed in with the -run passed in
|
||||
func runTest(packageToTest string, testName string) {
|
||||
cmd := exec.Command("go", "test", "-v", packageToTest, "-run", "^"+testName+"$")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("%s FAILED ------------------", testName)
|
||||
_, _ = os.Stderr.Write(out)
|
||||
log.Printf("%s FAILED ------------------", testName)
|
||||
} else {
|
||||
log.Printf("%s OK", testName)
|
||||
}
|
||||
}
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Syntax: %s <test_to_run>", os.Args[0])
|
||||
}
|
||||
packageToTest := args[0]
|
||||
testNames := findTests(packageToTest)
|
||||
// fmt.Printf("%s\n", testNames)
|
||||
for _, testName := range testNames {
|
||||
runTest(packageToTest, testName)
|
||||
}
|
||||
}
|
||||
@@ -43,6 +43,7 @@ import (
|
||||
_ "github.com/ncw/rclone/cmd/purge"
|
||||
_ "github.com/ncw/rclone/cmd/rc"
|
||||
_ "github.com/ncw/rclone/cmd/rcat"
|
||||
_ "github.com/ncw/rclone/cmd/rcd"
|
||||
_ "github.com/ncw/rclone/cmd/reveal"
|
||||
_ "github.com/ncw/rclone/cmd/rmdir"
|
||||
_ "github.com/ncw/rclone/cmd/rmdirs"
|
||||
|
||||
19
cmd/cmd.go
19
cmd/cmd.go
@@ -29,8 +29,8 @@ import (
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
fslog "github.com/ncw/rclone/fs/log"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fs/rc/rcserver"
|
||||
"github.com/ncw/rclone/lib/atexit"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -51,7 +51,7 @@ var (
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguents = errors.New("too many arguments")
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -294,14 +294,12 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
|
||||
if len(args) < MinArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum\n", cmd.Name(), MinArgs)
|
||||
// os.Exit(1)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum: you provided %d non flag arguments: %q\n", cmd.Name(), MinArgs, len(args), args)
|
||||
resolveExitCode(errorNotEnoughArguments)
|
||||
} else if len(args) > MaxArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
|
||||
// os.Exit(1)
|
||||
resolveExitCode(errorTooManyArguents)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum: you provided %d non flag arguments: %q\n", cmd.Name(), MaxArgs, len(args), args)
|
||||
resolveExitCode(errorTooManyArguments)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,8 +350,11 @@ func initConfig() {
|
||||
// Write the args for debug purposes
|
||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||
|
||||
// Start the remote control if configured
|
||||
rc.Start(&rcflags.Opt)
|
||||
// Start the remote control server if configured
|
||||
_, err = rcserver.Start(&rcflags.Opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start remote control: %v", err)
|
||||
}
|
||||
|
||||
// Setup CPU profiling if desired
|
||||
if *cpuProfile != "" {
|
||||
|
||||
@@ -53,7 +53,6 @@ func mountOptions(device string, mountpoint string) (options []string) {
|
||||
|
||||
// OSX options
|
||||
if runtime.GOOS == "darwin" {
|
||||
options = append(options, "-o", "volname="+mountlib.VolumeName)
|
||||
if mountlib.NoAppleDouble {
|
||||
options = append(options, "-o", "noappledouble")
|
||||
}
|
||||
@@ -70,6 +69,11 @@ func mountOptions(device string, mountpoint string) (options []string) {
|
||||
options = append(options, "--FileSystemName=rclone")
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
|
||||
if mountlib.VolumeName != "" {
|
||||
options = append(options, "-o", "volname="+mountlib.VolumeName)
|
||||
}
|
||||
}
|
||||
if mountlib.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -90,10 +93,28 @@ For example to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken. Each time that happens rclone will print a message
|
||||
saying how to affect the value taken.
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local false
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
return config.CreateRemote(args[0], args[1], args[2:])
|
||||
in, err := argsToMap(args[2:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = config.CreateRemote(args[0], args[1], in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ShowRemote(args[0])
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -107,10 +128,24 @@ in pairs of <key> <value>.
|
||||
For example to update the env_auth field of a remote of name myremote you would do:
|
||||
|
||||
rclone config update myremote swift env_auth true
|
||||
|
||||
If the remote uses oauth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
return config.UpdateRemote(args[0], args[1:])
|
||||
in, err := argsToMap(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = config.UpdateRemote(args[0], in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ShowRemote(args[0])
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -136,6 +171,29 @@ For example to set password of a remote of name myremote you would do:
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
return config.PasswordRemote(args[0], args[1:])
|
||||
in, err := argsToMap(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = config.PasswordRemote(args[0], in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ShowRemote(args[0])
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// This takes a list of arguments in key value key value form and
|
||||
// converts it into a map
|
||||
func argsToMap(args []string) (out rc.Params, err error) {
|
||||
if len(args)%2 != 0 {
|
||||
return nil, errors.New("found key without value")
|
||||
}
|
||||
out = rc.Params{}
|
||||
// Set the config
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
out[args[i]] = args[i+1]
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -50,6 +50,19 @@ If you are familiar with ` + "`rsync`" + `, rclone always works as if you had
|
||||
written a trailing / - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
See the [--no-traverse](/docs/#no-traverse) option for controlling
|
||||
whether rclone lists the destination directory or not. Supplying this
|
||||
option when copying a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can to copy all the files which have
|
||||
changed recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -40,6 +40,8 @@ This will:
|
||||
This doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. It doesn't delete files from the
|
||||
destination.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package copyurl
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -25,14 +22,7 @@ without saving it in tmp storage.
|
||||
fsdst, dstFileName := cmd.NewFsDstFile(args[1:])
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
resp, err := http.Get(args[0])
|
||||
if err != nil {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = operations.RcatSize(fsdst, dstFileName, resp.Body, resp.ContentLength, time.Now())
|
||||
|
||||
_, err := operations.CopyURL(fsdst, dstFileName, args[0])
|
||||
return err
|
||||
})
|
||||
},
|
||||
|
||||
@@ -14,9 +14,13 @@ var commandDefintion = &cobra.Command{
|
||||
Use: "delete remote:path",
|
||||
Short: `Remove the contents of path.`,
|
||||
Long: `
|
||||
Remove the contents of path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
|
||||
Remove the files in path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
|
||||
filters so can be used to selectively delete files.
|
||||
|
||||
` + "`" + `rclone delete` + "`" + ` only deletes objects but leaves the directory structure
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
` + "`" + `rclone purge` + "`" + `
|
||||
|
||||
Eg delete all files bigger than 100MBytes
|
||||
|
||||
Check what would be deleted first (use either)
|
||||
|
||||
@@ -138,6 +138,7 @@ func (r *results) checkChar(c rune) {
|
||||
escape := false
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
|
||||
escape = true
|
||||
} else {
|
||||
fs.Infof(r.f, "OK writing file 0x%02X", c)
|
||||
}
|
||||
|
||||
@@ -3,62 +3,26 @@ package lsjson
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/ls/lshelp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
recurse bool
|
||||
showHash bool
|
||||
showEncrypted bool
|
||||
showOrigIDs bool
|
||||
noModTime bool
|
||||
opt operations.ListJSONOpt
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
commandDefintion.Flags().BoolVarP(&showHash, "hash", "", false, "Include hashes in the output (may take longer).")
|
||||
commandDefintion.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
commandDefintion.Flags().BoolVarP(&showEncrypted, "encrypted", "M", false, "Show the encrypted names.")
|
||||
commandDefintion.Flags().BoolVarP(&showOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
|
||||
}
|
||||
|
||||
// lsJSON in the struct which gets marshalled for each line
|
||||
type lsJSON struct {
|
||||
Path string
|
||||
Name string
|
||||
Encrypted string `json:",omitempty"`
|
||||
Size int64
|
||||
MimeType string `json:",omitempty"`
|
||||
ModTime Timestamp //`json:",omitempty"`
|
||||
IsDir bool
|
||||
Hashes map[string]string `json:",omitempty"`
|
||||
ID string `json:",omitempty"`
|
||||
OrigID string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON
|
||||
func (t Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
tt := time.Time(t)
|
||||
if tt.IsZero() {
|
||||
return []byte(`""`), nil
|
||||
}
|
||||
return []byte(`"` + tt.Format(time.RFC3339Nano) + `"`), nil
|
||||
commandDefintion.Flags().BoolVarP(&opt.Recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).")
|
||||
commandDefintion.Flags().BoolVarP(&opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -104,107 +68,27 @@ can be processed line by line as each item is written one to a line.
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
var cipher crypt.Cipher
|
||||
if showEncrypted {
|
||||
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
if fsInfo.Name != "crypt" {
|
||||
log.Fatalf("The remote needs to be of type \"crypt\"")
|
||||
}
|
||||
cipher, err = crypt.NewCipher(config)
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
fmt.Println("[")
|
||||
first := true
|
||||
err := walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
err := operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
||||
out, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
fs.Errorf(dirPath, "error listing: %v", err)
|
||||
return nil
|
||||
return errors.Wrap(err, "failed to marshal list object")
|
||||
}
|
||||
for _, entry := range entries {
|
||||
item := lsJSON{
|
||||
Path: entry.Remote(),
|
||||
Name: path.Base(entry.Remote()),
|
||||
Size: entry.Size(),
|
||||
MimeType: fs.MimeTypeDirEntry(entry),
|
||||
}
|
||||
if !noModTime {
|
||||
item.ModTime = Timestamp(entry.ModTime())
|
||||
}
|
||||
if cipher != nil {
|
||||
switch entry.(type) {
|
||||
case fs.Directory:
|
||||
item.Encrypted = cipher.EncryptDirName(path.Base(entry.Remote()))
|
||||
case fs.Object:
|
||||
item.Encrypted = cipher.EncryptFileName(path.Base(entry.Remote()))
|
||||
default:
|
||||
fs.Errorf(nil, "Unknown type %T in listing", entry)
|
||||
}
|
||||
}
|
||||
if do, ok := entry.(fs.IDer); ok {
|
||||
item.ID = do.ID()
|
||||
}
|
||||
if showOrigIDs {
|
||||
cur := entry
|
||||
for {
|
||||
u, ok := cur.(fs.ObjectUnWrapper)
|
||||
if !ok {
|
||||
break // not a wrapped object, use current id
|
||||
}
|
||||
next := u.UnWrap()
|
||||
if next == nil {
|
||||
break // no base object found, use current id
|
||||
}
|
||||
cur = next
|
||||
}
|
||||
if do, ok := cur.(fs.IDer); ok {
|
||||
item.OrigID = do.ID()
|
||||
}
|
||||
}
|
||||
switch x := entry.(type) {
|
||||
case fs.Directory:
|
||||
item.IsDir = true
|
||||
case fs.Object:
|
||||
item.IsDir = false
|
||||
if showHash {
|
||||
item.Hashes = make(map[string]string)
|
||||
for _, hashType := range x.Fs().Hashes().Array() {
|
||||
hash, err := x.Hash(hashType)
|
||||
if err != nil {
|
||||
fs.Errorf(x, "Failed to read hash: %v", err)
|
||||
} else if hash != "" {
|
||||
item.Hashes[hashType.String()] = hash
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
fs.Errorf(nil, "Unknown type %T in listing", entry)
|
||||
}
|
||||
out, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal list object")
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Print(",\n")
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write to output")
|
||||
}
|
||||
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Print(",\n")
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write to output")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing JSON")
|
||||
return err
|
||||
}
|
||||
if !first {
|
||||
fmt.Println()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user