1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-22 12:23:15 +00:00

Compare commits

..

112 Commits

Author SHA1 Message Date
Nick Craig-Wood
a5863650af fs: Remove the charset from the Mimetype -FIXME EXPERIMENT 2019-01-08 17:22:23 +00:00
Denis Skovpen
2d2533a08a cmd/copyurl: fix checking of --dry-run 2019-01-08 11:28:05 +00:00
Nick Craig-Wood
733b072d4f azureblob: ignore directory markers in inital Fs creation too - fixes #2806
This is a follow-up to feea0532 including the initial Fs creation
where the backend detects whether the path is pointing to a file or a
directory.
2019-01-08 11:21:20 +00:00
Nick Craig-Wood
2d01a65e36 oauthutil: read a fresh token config file before using the refresh token.
This means that rclone will pick up tokens from concurrently running
rclones.  This helps for Box which only allows each refresh token to
be used once.

Without this fix, rclone caches the refresh token at the start of the
run, then when the token expires the refresh token may have been used
already by a concurrently running rclone.

This also will retry the oauth up to 5 times at 1 second intervals.

See: https://forum.rclone.org/t/box-token-refresh-timing/8175
2019-01-08 11:01:30 +00:00
Nick Craig-Wood
b8280521a5 drive: supply correct scopes to when using --drive-impersonate
This fixes using --drive-impersonate and appfolders.
2019-01-07 11:50:05 +00:00
Nick Craig-Wood
60e6af2605 Add andrea rota to contributors 2019-01-05 20:56:03 +00:00
andrea rota
9d16822c63 note on minimum version with support for b2 multi application keys
This trivial patch adds a note about the minimum version of rclone
needed in order to be able to use multiple application keys with the b2
backend.

As Debian stable (amongst other distros) is shipping an older version,
users running rclone < 1.43 and reading about this feature in the online
docs may struggle to realise why they are not able to sync to b2 when
configured to use an application key other than the master one.

For reference: https://github.com/ncw/rclone/issues/2513
2019-01-04 11:22:20 +00:00
Cnly
38a0946071 docs: update OneDrive limitations and versioning issue 2019-01-04 16:42:19 +08:00
Nick Craig-Wood
95e52e1ac3 cmd: improve error reporting for too many/few arguments - fixes #2860
Improve docs on the different kind of flag passing.
2018-12-29 17:40:21 +00:00
Nick Craig-Wood
51ab1c940a Add Sebastian Bünger - @buengese to the MAINTAINERS list
Also add areas of specific responsibility
2018-12-29 16:08:40 +00:00
Nick Craig-Wood
6f30427357 yandex: note --timeout increase needed for large files
See: https://forum.rclone.org/t/rclone-stucks-at-the-end-of-a-big-file-upload/8102
2018-12-29 16:08:40 +00:00
Cnly
3220acc729 fstests: fix TestFsName fails when using remote:with/path 2018-12-29 09:34:04 +00:00
Nick Craig-Wood
3c97933416 oauthutil: suppress ERROR message when doing remote config
Before this change doing a remote config using rclone authorize gave
this error.  The token is saved a bit later anyway so the error is
needlessly confusing.

    ERROR : Failed to save new token in config file: section 'remote' not found.

This commit suppresses that error.

https://forum.rclone.org/t/onedrive-for-business-failed-to-save-token/8061
2018-12-28 09:53:53 +00:00
Nick Craig-Wood
039e2a9649 vendor: pull in github.com/ncw/swift latest to fix reauth on big files 2018-12-28 09:23:57 +00:00
Nick Craig-Wood
1c01d0b84a vendor: update dropbox SDK to fix failing integration tests #2829 2018-12-26 15:17:03 +00:00
Nick Craig-Wood
39eac7a765 Add Jay to contributors 2018-12-26 15:08:09 +00:00
Jay
082a7065b1 Use vfsgen for static HTML templates 2018-12-26 15:07:21 +00:00
Jay
f7b08a6982 vendor: add github.com/shurcooL/vfsgen 2018-12-26 15:07:21 +00:00
Nick Craig-Wood
37e32d8c80 Add Arkadius Stefanski to contributors 2018-12-26 15:03:27 +00:00
Arkadius Stefanski
f2a1b991de readme: fix copying link 2018-12-26 15:03:08 +00:00
Nick Craig-Wood
4128e696d6 Add François Leurent to contributors
New email for Animosity022
2018-12-26 15:00:16 +00:00
François Leurent
7e7f3de355 qingcloud: fix typos in trace messages 2018-12-26 14:51:48 +00:00
Nick Craig-Wood
1f6a1cd26d vfs: add test_vfs code for hunting for deadlocks 2018-12-26 09:08:27 +00:00
Nick Craig-Wood
2cfe2354df vfs: fix deadlock between RWFileHandle.close and File.Remove - fixes #2857
Before this change we took the locks file.mu and file.muRW in an
inconsistent order - after the change we always take them in the same
order to fix the deadlock.
2018-12-26 09:08:27 +00:00
Nick Craig-Wood
13387c0838 vfs: fix deadlock on concurrent operations on a directory - fixes #2811
Before this fix there were two paths where concurrent use of a
directory could take the file lock then directory lock and the other
would take the locks in the reverse order.

Fix this by narrowing the locking windows so the file lock and
directory lock don't overlap.
2018-12-26 09:08:27 +00:00
Animosity022
5babf2dc5c Update drive.md
Fixed the Google Drive API documentation
2018-12-22 18:37:00 +00:00
Nick Craig-Wood
9012d7c6c1 cmd: fix --progress crash under Windows Jenkins - fixes #2846 2018-12-22 18:05:13 +00:00
Nick Craig-Wood
df1faa9a8f webdav: fail soft on time parsing errors
The time format provided by webdav servers seems to vary wildly from
that specified in the RFC - rclone already parses times in 5 different
formats!

If an unparseable time is found, then fail softly logging an ERROR
(just once) but returning the epoch.

This will mean that webdav servers with bad time formats will still be
usable by rclone.
2018-12-20 12:10:15 +00:00
Nick Craig-Wood
3de7ad5223 b2: for a bucket limited application key check the bucket name
Before this fix rclone would just use the authorised bucket regardless
of what bucket you put on the command line.

This uses the new `bucketName` response in the API and checks that the
user is using the correct bucket name to avoid accidents.

Fixes #2839
2018-12-20 12:07:35 +00:00
Garry McNulty
9cb3a68c38 crypt: check for maximum length before decrypting filename
The EME Transform() method will panic if the input data is larger than
2048 bytes.

Fixes #2826
2018-12-19 11:51:44 +00:00
Nick Craig-Wood
c1dd76788d httplib: make http serving with auth generate INFO messages on auth fail
2018/12/13 12:13:44 INFO  : /: 127.0.0.1:39696: Basic auth challenge sent
2018/12/13 12:13:54 INFO  : /: 127.0.0.1:40050: Unauthorized request from ncw

Fixes #2834
2018-12-14 13:38:49 +00:00
Nick Craig-Wood
5ee1816a71 filter: parallelise reading of --files-from - fixes #2835
Before this change rclone would read the list of files from the
files-from parameter and check they existed one at a time.  This could
take a very long time for lots of files.

After this change, rclone will check up to --checkers in parallel.
2018-12-13 13:22:30 +00:00
Nick Craig-Wood
63b51c6742 vendor: add golang.org/x/sync as a dependency 2018-12-13 10:45:52 +00:00
Nick Craig-Wood
e7684b7ed5 Add William Cocker to contributors 2018-12-06 21:53:53 +00:00
William Cocker
dda23baf42 s3 : update doc for Glacier storage class
s3 : update doc for Glacier storage class : related to #923
2018-12-06 21:53:38 +00:00
William Cocker
8575abf599 s3: add GLACIER storage class
Fixes #923
2018-12-06 21:53:05 +00:00
Nick Craig-Wood
feea0532cd azureblob: ignore directory markers - fixes #2806
This ignores 0 length blobs if
- they end with /
- they have the metadata hdi_isfolder = true
2018-12-06 21:47:03 +00:00
Nick Craig-Wood
d3e8ae1820 Add Mark Otway to contributors 2018-12-06 15:13:03 +00:00
Nick Craig-Wood
91a9a959a2 Add Mathieu Carbou to contributors 2018-12-06 15:12:58 +00:00
Mark Otway
04eae51d11 Fix install for Synology
7z check doesn't work due to misplaced comma, so installation fails on Synology.
2018-12-06 15:12:21 +00:00
Mathieu Carbou
8fb707e16d Fixes #1788: Retry-After support for Dropbox backend 2018-12-05 22:03:30 +00:00
Mathieu Carbou
4138d5aa75 Issue #1788: Pointing to Dropbox's v5.0.0 tag 2018-12-05 22:03:30 +00:00
Nick Craig-Wood
fc654a4cec http: fix backend with --files-from and non-existent files
Before this fix the http backend was returning the wrong error code
when files were not found.  This was causing --files-from to error on
missing files instead of skipping them like it should.
2018-12-04 17:40:44 +00:00
Nick Craig-Wood
26b5f55cba Update after goimports change 2018-12-04 10:11:57 +00:00
Nick Craig-Wood
3f572e6bf2 webdav: fix infinite loop on failed directory creation - fixes #2714 2018-12-02 21:03:12 +00:00
Nick Craig-Wood
941ad6bc62 azureblob: use the s3 pacer for 0 delay - fixes #2799 2018-12-02 20:55:16 +00:00
Nick Craig-Wood
5d1d93e163 azureblob: use the rclone HTTP client - fixes #2654
This enables --dump headers and --timeout to work properly.
2018-12-02 20:55:16 +00:00
Nick Craig-Wood
35fba5bfdd Add Garry McNulty to contributors 2018-12-02 20:52:04 +00:00
Garry McNulty
887834da91 b2: cleanup unfinished large files
The `cleanup` command will delete unfinished large file uploads that
were started more than a day ago (to avoid deleting uploads that are
potentially still in progress).

Fixes #2617
2018-12-02 20:51:13 +00:00
Nick Craig-Wood
107293c80e copy,move: restore --no-traverse flag
The --no-traverse flag was not implemented when the new sync routines
(using the march package) was implemented.

This re-implements --no-traverse in march by trying to find a match
for each object with NewObject rather than from a directory listing.
2018-12-02 20:28:13 +00:00
Nick Craig-Wood
e3c4ebd59a march: factor calling parameters into a structure 2018-12-02 18:07:26 +00:00
Nick Craig-Wood
d99ffde7c0 s3: change --s3-upload-concurrency default to 4 to increase perfomance #2772
Increasing the --s3-upload-concurrency to 4 (from 2) gives an
additional 45% throughput at the cost of 10MB extra memory per transfer.

After testing the upload perfoc
2018-12-02 17:58:34 +00:00
Nick Craig-Wood
198c34ce21 s3: implement --s3-upload-cutoff for single part uploads below this - fixes #2772
Before this change rclone would use multipart uploads for any size of
file.  However multipart uploads are less efficient for smaller files
and don't have MD5 checksums so it is advantageous to use single part
uploads if possible.

This implements single part uploads for all files smaller than the
upload_cutoff size.  Streamed files must be uploaded as multipart
files though.
2018-12-02 17:58:34 +00:00
Nick Craig-Wood
0eba88bbfe sftp: check directory is empty before issuing rmdir
Some SFTP servers allow rmdir on full directories which is allowed
according to the RFC so make sure we don't accidentally delete data
here.

See: https://forum.rclone.org/t/rmdir-and-delete-empty-src-dirs-file-does-not-exist/7737
2018-12-02 11:16:30 +00:00
Nick Craig-Wood
aeea4430d5 swift: efficiency: slim Object and reduce requests on upload
- Slim down Object to only include necessary data
- Don't HEAD an object after PUT - read the hash from the response
2018-12-02 10:23:55 +00:00
Nick Craig-Wood
4b15c4215c sftp: fix rmdir on Windows based servers (eg CrushFTP)
Before this change we used Remove to remove directories.  This works
fine on Unix based systems but not so well on Windows based ones.
Swap to using RemoveDirectory instead.
2018-11-29 21:34:37 +00:00
Nick Craig-Wood
50452207d9 swift: add --swift-no-chunk to disable segmented uploads in rcat/mount
Fixes #2776
2018-11-29 11:11:30 +00:00
Nick Craig-Wood
01fcad9b9c rc: fix docs for sync/{sync,copy,move} and operations/{copy,move}file 2018-11-29 11:11:30 +00:00
themylogin
eb41253764 azureblob: allow building azureblob backend on *BSD
FreeBSD support was added in Azure/azure-storage-blob-go@0562badec5
OpenBSD and NetBSD support was added in Azure/azure-storage-blob-go@1d6dd77d74
2018-11-27 12:20:48 +00:00
Nick Craig-Wood
89625e54cf vendor: update dependencies to latest 2018-11-26 14:10:33 +00:00
Nick Craig-Wood
58f7141c96 drive, googlecloudstorage: disallow on go1.8 due to dependent library changes
golang.org/x/oauth2/google no longer builds on go1.8
2018-11-26 14:10:33 +00:00
Nick Craig-Wood
e56c6402a7 serve restic: disallow on go1.8 because of dependent library changes
golang.org/x/net/http2 no longer builds on go1.8
2018-11-26 14:10:33 +00:00
Nick Craig-Wood
d0eb8ddc30 serve webdav: disallow on go1.8 due to dependent library changes
golang.org/x/net/webdav no longer builds with go1.8
2018-11-26 14:10:33 +00:00
Nick Craig-Wood
a6c28a5faa Start v1.45-DEV development 2018-11-24 15:20:24 +00:00
Nick Craig-Wood
d35bd15762 Version v1.45 2018-11-24 13:44:25 +00:00
Nick Craig-Wood
8b8220c4f7 azureblob: wait for up to 60s to create a just deleted container
When a container is deleted, a container with the same name cannot be
created for at least 30 seconds; the container may not be available
for more than 30 seconds if the service is still processing the
request.

We sleep so that we wait at most 60 seconds.  This is mostly useful in
the integration tests where containers get deleted and remade
immediately.
2018-11-24 10:57:37 +00:00
Nick Craig-Wood
5fe3b0ad71 Add Stephen Harris to contributors 2018-11-24 10:57:37 +00:00
Stephen Harris
4c8c87a935 Update PROXY section of the FAQ 2018-11-23 20:14:36 +00:00
Nick Craig-Wood
bb10a51b39 test_all: limit to go1.11 so the template used is supported 2018-11-23 17:17:19 +00:00
Nick Craig-Wood
df01f7a4eb test_all: fix regexp for retrying nested tests 2018-11-23 17:17:19 +00:00
Nick Craig-Wood
e84790ef79 swift: add pacer for retries to make swift more reliable #2740 2018-11-22 22:15:52 +00:00
Nick Craig-Wood
369a8ee17b ncdu: fix deleting files 2018-11-22 21:41:17 +00:00
Nick Craig-Wood
84e21ade6b cmount: fix on Linux - only apply volname for Windows and macOS 2018-11-22 20:41:05 +00:00
Sebastian Bünger
703b0535a4 yandex: update docs 2018-11-22 20:14:50 +00:00
Sebastian Bünger
155264ae12 yandex: complete rewrite
Get rid of the api client and use rest/pacer for all API calls
Add Copy, Move, DirMove, PublicLink, About optional interfaces
Improve general error handling
Remove ListR for now due to inconsitent behaviour
fixes #2586, progress on #2740 and #2178
2018-11-22 20:14:50 +00:00
Nick Craig-Wood
31e2ce03c3 fstests: re-arrange backend integration tests so they can be retried
Before this change backend integration tests depended on each other,
so tests could not be retried.

After this change we nest tests to ensure that tests are provided with
the starting state they expect.

Tell the integration test runner that it can retry backend tests also.

This also includes bin/test_independence.go which runs each test
individually for a backend to prove that they are independent.
2018-11-22 20:12:12 +00:00
Nick Craig-Wood
e969505ae4 info: fix control character map output 2018-11-20 14:04:27 +00:00
Nick Craig-Wood
26e2f1a998 Add Alexander to contributors 2018-11-20 10:22:11 +00:00
Alexander
2682d5a9cf - install with busybox if any 2018-11-20 10:22:00 +00:00
Nick Craig-Wood
2191592e80 Add Henry Ptasinski to contributors 2018-11-19 13:33:59 +00:00
Nick Craig-Wood
18f758294e Add Peter Kaminski to contributors 2018-11-19 13:33:59 +00:00
Henry Ptasinski
f95c1c61dd s3: add config info for Wasabi's US-West endpoint
Wasabi has two location, US East and US West, with different endpoint URLs.
When configuring S3 to use Wasabi, provide the endpoint information for both
locations.
2018-11-19 13:33:42 +00:00
Nick Craig-Wood
8c8dcdd521 webdav: fix config parsing so --webdav-user and --webdav-pass flags work 2018-11-17 13:14:54 +00:00
Nick Craig-Wood
141c133818 fstest: Wait for longer if neccessary in TestFsChangeNotify 2018-11-16 07:45:24 +00:00
Nick Craig-Wood
0f03e55cd1 fstests: ignore main directory creation in TestFsChangeNotify 2018-11-15 18:39:28 +00:00
Nick Craig-Wood
9e6ba92a11 fstests: attempt to fix TestFsChangeNotify flakiness
This now uses testPut to upload the test files which will retry on
errors properly.
2018-11-15 18:39:28 +00:00
Nick Craig-Wood
762561f88e fstest: factor out retry logic from put code and make testPut return the object too 2018-11-15 18:39:28 +00:00
Nick Craig-Wood
084fe38922 fstests: fixes the integration test errors running crypt over swift.
Skip tests involving errors creating or removing dirs on non root
bucket based fs
2018-11-15 18:39:28 +00:00
Peter Kaminski
63a2a935fc fix typos in original files, per #2727 review request 2018-11-14 22:48:58 +00:00
Peter Kaminski
64fce8438b docs: Fix a couple of minor typos in rclone_mount.md
* "transferring" instead of "transfering"
* "connection" instead of "connnection"
* "mount" instead of "mount mount"
2018-11-14 22:48:58 +00:00
Nick Craig-Wood
f92beb4e14 fstest: Fix TestPurge causing errors with subsequent tests on azure
Before this change TestPurge would remove a container and subsequent
tests would fail because the container was still being deleted so
couldn't be created.

This was fixed by introducing an fstest.NewRunIndividual() test runner
for TestPurge which causes the test to be run on a new container.
2018-11-14 17:14:02 +00:00
Nick Craig-Wood
f7ce2e8d95 azureblob: fix erroneous Rmdir error "directory not empty"
Before this change Rmdir would check the root rather than the
directory specified for being empty and return "directory not empty"
when it shouldn't have done.
2018-11-14 17:13:39 +00:00
Nick Craig-Wood
3975d82b3b Add brused27 to contributors 2018-11-13 17:00:26 +00:00
brused27
d87aa33ec5 azureblob: Avoid context deadline exceeded error by setting a large TryTimeout value - Fixes #2647 2018-11-13 16:59:53 +00:00
Anagh Kumar Baranwal
1b78f4d1ea Changed the docs scripts to use $HOME & $USER instead of specific values
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-11-13 11:00:34 +00:00
Nick Craig-Wood
b3704597f3 cmount: make --volname work for Windows - fixes #2679 2018-11-12 16:32:02 +00:00
Nick Craig-Wood
16f797a7d7 filter: add --ignore-case flag - fixes #502
The --ignore-case flag causes the filtering of file names to be case
insensitive.  The flag name comes from GNU tar.
2018-11-12 14:29:37 +00:00
Nick Craig-Wood
ee700ec01a lib/readers: add mutex to RepeatableReader - fixes #2572 2018-11-12 12:02:05 +00:00
Nick Craig-Wood
9b3c951ab7 Add Jake Coggiano to contributors 2018-11-12 11:34:28 +00:00
Jake Coggiano
22d17e79e3 dropbox: add dropbox impersonate support - fixes #2577 2018-11-12 11:33:39 +00:00
Jake Coggiano
6d3088a00b vendor: add github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team/ 2018-11-12 11:33:39 +00:00
Nick Craig-Wood
84202c7471 onedrive: note 50,000 files is limit for one directory #2707 2018-11-11 15:22:19 +00:00
Nick Craig-Wood
96a05516f9 acd,box,onedrive,pcloud: remote log.Fatal from NewFs
And replace with error returns.
2018-11-11 11:00:14 +00:00
Nick Craig-Wood
4f6a942595 cmd: Make --progress update the stats right at the end
Before this when rclone exited the stats would just show the last
printed version, rather than the actual final state.
2018-11-11 09:57:37 +00:00
Nick Craig-Wood
c4b0a37b21 rc: improve docs on debugging 2018-11-10 10:18:13 +00:00
Nick Craig-Wood
9322f4baef Add Erik Swanson to contributors 2018-11-08 12:58:41 +00:00
Erik Swanson
fa0a1e7261 s3: fix role_arn, credential_source, ...
When the env_auth option is enabled, the AWS SDK's session constructor
now loads configuration from ~/.aws/config and environment variables,
and credentials per the selected (or default) AWS_PROFILE's settings.

This is accomplished by **NOT** including any Credential provider in the
aws.Config passed to the session constructor: If the Config.Credentials
is non-nil, that will always be used and the user's configuration re
role_arn, credential_source, source_profile, etc... from the shared
config will be completely ignored.

(The conditional creation and configuration of the stscreds Credential
provider is complicated enough that it is not worth re-creating that
logic.)
2018-11-08 12:58:23 +00:00
Nick Craig-Wood
4ad08794c9 fserrors: add "server closed idle connection" to retriable errors
This seems to be related to this go issue: https://github.com/golang/go/issues/19943

See: https://forum.rclone.org/t/copy-from-dropbox-to-google-drive-yields-failed-to-copy-failed-to-open-source-object-server-closed-idle-connection-error/7460
2018-11-08 11:12:25 +00:00
Nick Craig-Wood
c0f600764b Add Scott Edlund to contributors 2018-11-07 14:27:06 +00:00
Scott Edlund
f139e07380 enable softfloat on MIPS arch
MIPS does not have a floating point unit.  Enable softfloat to build binaries run on devices that do not have MIPS_FPU enabled in their kernel.
2018-11-07 14:26:48 +00:00
Nick Craig-Wood
c6786eeb2d move: don't create directories with --dry-run - fixes #2676 2018-11-06 13:34:15 +00:00
Nick Craig-Wood
57b85b8155 rc: fix job tests on Windows 2018-11-06 13:03:48 +00:00
409 changed files with 24858 additions and 7681 deletions

View File

@@ -1,14 +1,17 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are
Current active maintainers of rclone are:
* Nick Craig-Wood @ncw
* Stefan Breunig @breunigs
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
* Alex Chen @Cnly
* Sandeep Ummadi @sandeepkru
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
**This is a work in progress Draft**

File diff suppressed because it is too large Load Diff

870
MANUAL.md

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -116,7 +116,7 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
rclone gendocs docs/content/commands/
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py
./bin/make_backend_docs.py

View File

@@ -91,4 +91,4 @@ License
-------
This is free software under the terms of MIT the license (check the
[COPYING file](/rclone/COPYING) included in this package).
[COPYING file](/COPYING) included in this package).

View File

@@ -21,7 +21,7 @@ import (
"strings"
"time"
"github.com/ncw/go-acd"
acd "github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
@@ -264,7 +264,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
if err != nil {
log.Fatalf("Failed to configure Amazon Drive: %v", err)
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
}
c := acd.NewClient(oAuthClient)

View File

@@ -1,6 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
// +build !plan9,!solaris,go1.8
package azureblob
@@ -22,12 +22,14 @@ import (
"sync"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
@@ -50,6 +52,7 @@ const (
defaultUploadCutoff = 256 * fs.MebiByte
maxUploadCutoff = 256 * fs.MebiByte
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
)
// Register with Fs
@@ -134,6 +137,7 @@ type Fs struct {
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURL *azblob.ContainerURL // reference to containerURL
container string // the container we are working on
@@ -271,6 +275,38 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
// httpClientFactory creates a Factory object that sends HTTP requests
// to a rclone's http.Client.
//
// copied from azblob.newDefaultHTTPClientFactory
func httpClientFactory(client *http.Client) pipeline.Factory {
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
r, err := client.Do(request.WithContext(ctx))
if err != nil {
err = pipeline.NewError(err, "HTTP request failed")
}
return pipeline.NewHTTPResponse(r), err
}
})
}
// newPipeline creates a Pipeline using the specified credentials and options.
//
// this code was copied from azblob.NewPipeline
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
// Closest to API goes first; closest to the wire goes last
factories := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry),
azblob.NewUniqueRequestIDPolicyFactory(),
azblob.NewRetryPolicyFactory(o.Retry),
c,
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
azblob.NewRequestLogPolicyFactory(o.RequestLog),
}
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -306,6 +342,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
f := &Fs{
name: name,
opt: *opt,
container: container,
root: directory,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
var (
u *url.URL
serviceURL azblob.ServiceURL
@@ -322,7 +375,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.SASURL != "":
@@ -331,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to parse SAS URL")
}
// use anonymous credentials in case of sas url
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
// Check if we have container level SAS or account level sas
parts := azblob.NewBlobURLParts(*u)
if parts.ContainerName != "" {
@@ -348,24 +401,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
default:
return nil, errors.New("Need account+key or connectionString or sasURL")
}
f.svcURL = &serviceURL
f.cntURL = &containerURL
f := &Fs{
name: name,
opt: *opt,
container: container,
root: directory,
svcURL: &serviceURL,
cntURL: &containerURL,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
if f.root != "" {
f.root += "/"
// Check to see if the (container,directory) is actually an existing file
@@ -379,8 +417,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
_, err := f.NewObject(remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.root = oldRoot
return f, nil
}
@@ -436,6 +474,21 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
}
// Returns whether file is a directory marker or not
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
// Directory markers are 0 length
if size == 0 {
// Note that metadata with hdi_isfolder = true seems to be a
// defacto standard for marking blobs as directories.
endsWithSlash := strings.HasSuffix(remote, "/")
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
return true
}
}
return false
}
// listFn is called from list to handle an object
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
@@ -471,6 +524,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
MaxResults: int32(maxResults),
}
ctx := context.Background()
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse
err := f.pacer.Call(func() (bool, error) {
@@ -500,13 +554,23 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
continue
}
remote := file.Name[len(f.root):]
// Check for directory
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, file, true)
if err != nil {
return err
}
// Keep track of directory markers. If recursing then
// there will be no Prefixes so no need to keep track
if !recurse {
directoryMarkers[remote] = struct{}{}
}
continue // skip directory marker
}
// Send object
err = fn(remote, file, isDirectory)
err = fn(remote, file, false)
if err != nil {
return err
}
@@ -519,6 +583,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
continue
}
remote = remote[len(f.root):]
// Don't send if already sent as a directory marker
if _, found := directoryMarkers[remote]; found {
continue
}
// Send object
err = fn(remote, nil, true)
if err != nil {
@@ -705,6 +773,11 @@ func (f *Fs) Mkdir(dir string) error {
f.containerOK = true
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.containerDeleted = true
return true, err
}
@@ -722,7 +795,7 @@ func (f *Fs) Mkdir(dir string) error {
// isEmpty checks to see if a given directory is empty and returns an error if not
func (f *Fs) isEmpty(dir string) (err error) {
empty := true
err = f.list("", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false
return nil
})
@@ -917,27 +990,37 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
// o.md5
// o.meta
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
metadata := info.NewMetadata()
size := info.ContentLength()
if isDirectoryMarker(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
o.mimeType = info.ContentType()
o.size = info.ContentLength()
o.size = size
o.modTime = time.Time(info.LastModified())
o.accessTier = azblob.AccessTierType(info.AccessTier())
o.setMetadata(info.NewMetadata())
o.setMetadata(metadata)
return nil
}
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
metadata := info.Metadata
size := *info.Properties.ContentLength
if isDirectoryMarker(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
o.mimeType = *info.Properties.ContentType
o.size = *info.Properties.ContentLength
o.size = size
o.modTime = info.Properties.LastModified
o.accessTier = info.Properties.AccessTier
o.setMetadata(info.Metadata)
o.setMetadata(metadata)
return nil
}

View File

@@ -1,4 +1,4 @@
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
// +build !plan9,!solaris,go1.8
package azureblob

View File

@@ -1,6 +1,6 @@
// Test AzureBlob filesystem interface
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
// +build !plan9,!solaris,go1.8
package azureblob

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build freebsd netbsd openbsd plan9 solaris !go1.8
// +build plan9 solaris !go1.8
package azureblob

View File

@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`

View File

@@ -368,6 +368,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// If this is a key limited to a single bucket, it must exist already
if f.bucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.info.Allowed.BucketName
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.bucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.markBucketOK()
f.setBucketID(f.info.Allowed.BucketID)
}
@@ -980,6 +987,12 @@ func (f *Fs) purge(oldOnly bool) error {
errReturn = err
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
@@ -1003,6 +1016,9 @@ func (f *Fs) purge(oldOnly bool) error {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
}

View File

@@ -252,7 +252,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure Box: %v", err)
return nil, errors.Wrap(err, "failed to configure Box")
}
f := &Fs{

View File

@@ -15,7 +15,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
cache "github.com/patrickmn/go-cache"
"golang.org/x/net/websocket"
)

View File

@@ -8,7 +8,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
)

View File

@@ -41,6 +41,7 @@ var (
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
@@ -284,6 +285,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
// not possible if decodeFilename() working correctly
return "", ErrorTooShortAfterDecode
}
if len(rawCiphertext) > 2048 {
return "", ErrorTooLongAfterDecode
}
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
if err != nil {

View File

@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
func TestDecryptSegment(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
for _, test := range []struct {
in string
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
}{
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},

View File

@@ -1,4 +1,7 @@
// Package drive interfaces with the Google Drive object storage system
// +build go1.9
package drive
// FIXME need to deal with some corner cases
@@ -122,6 +125,29 @@ var (
_linkTemplates map[string]*template.Template // available link types
)
// Parse the scopes option returning a slice of scopes
func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
return scopes
}
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool {
for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -136,18 +162,14 @@ func init() {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
// Fill in the scopes
if opt.Scope == "" {
opt.Scope = defaultScope
}
driveConfig.Scopes = nil
for _, scope := range strings.Split(opt.Scope, ",") {
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
// Set the root_folder_id if using drive.appfolder
if scope == "drive.appfolder" {
m.Set("root_folder_id", "appDataFolder")
}
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" {
err = oauthutil.Config("drive", name, m, driveConfig)
if err != nil {
@@ -753,7 +775,8 @@ func newPacer() *pacer.Pacer {
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}

View File

@@ -1,3 +1,5 @@
// +build go1.9
package drive
import (
@@ -20,6 +22,31 @@ import (
"google.golang.org/api/drive/v3"
)
func TestDriveScopes(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantFlag bool
}{
{"", []string{
"https://www.googleapis.com/auth/drive",
}, false},
{" drive.file , drive.readonly", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.readonly",
}, false},
{" drive.file , drive.appfolder", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.appfolder",
}, true},
} {
got := driveScopes(test.in)
assert.Equal(t, test.want, got, test.in)
gotFlag := driveScopesContainsAppFolder(got)
assert.Equal(t, test.wantFlag, gotFlag, test.in)
}
}
/*
var additionalMimeTypes = map[string]string{
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
@@ -243,10 +270,19 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("DocumentImport", f.InternalTestDocumentImport)
t.Run("DocumentUpdate", f.InternalTestDocumentUpdate)
t.Run("DocumentExport", f.InternalTestDocumentExport)
t.Run("DocumentLink", f.InternalTestDocumentLink)
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
f.InternalTestDocumentImport(t)
t.Run("DocumentUpdate", func(t *testing.T) {
f.InternalTestDocumentUpdate(t)
t.Run("DocumentExport", func(t *testing.T) {
f.InternalTestDocumentExport(t)
t.Run("DocumentLink", func(t *testing.T) {
f.InternalTestDocumentLink(t)
})
})
})
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,4 +1,7 @@
// Test Drive filesystem interface
// +build go1.9
package drive
import (

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package drive

View File

@@ -8,6 +8,8 @@
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
// +build go1.9
package drive
import (

View File

@@ -31,9 +31,11 @@ import (
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
@@ -131,13 +133,19 @@ slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}, {
Name: "impersonate",
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
}
// Fs represents a remote dropbox server
@@ -149,6 +157,7 @@ type Fs struct {
srv files.Client // the connection to the dropbox server
sharing sharing.Client // as above, but for generating sharing links
users users.Client // as above, but for accessing user information
team team.Client // for the Teams API
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *pacer.Pacer // To pace the API calls
@@ -195,7 +204,16 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// FIXME there is probably a better way of doing this!
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
}
return true, err
}
// Keep old behaviour for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
return true, err
}
@@ -262,6 +280,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
Client: oAuthClient, // maybe???
HeaderGenerator: f.headerGenerator,
}
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
f.team = team.New(config)
if opt.Impersonate != "" {
user := team.UserSelectorArg{
Email: opt.Impersonate,
}
user.Tag = "email"
members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
}
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(config)
f.sharing = sharing.New(config)
f.users = users.New(config)

View File

@@ -1,4 +1,7 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
// +build go1.9
package googlecloudstorage
/*

View File

@@ -1,4 +1,7 @@
// Test GoogleCloudStorage filesystem interface
// +build go1.9
package googlecloudstorage_test
import (

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package googlecloudstorage

View File

@@ -193,7 +193,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
}
err := o.stat()
if err != nil {
return nil, errors.Wrap(err, "Stat failed")
return nil, err
}
return o, nil
}
@@ -416,6 +416,9 @@ func (o *Object) url() string {
func (o *Object) stat() error {
url := o.url()
res, err := o.fs.httpClient.Head(url)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")

View File

@@ -144,6 +144,11 @@ func TestNewObject(t *testing.T) {
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject("not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) {

View File

@@ -404,13 +404,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
if opt.DriveID == "" || opt.DriveType == "" {
log.Fatalf("Unable to get drive_id and drive_type. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure OneDrive: %v", err)
return nil, errors.Wrap(err, "failed to configure OneDrive")
}
f := &Fs{

View File

@@ -246,7 +246,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure Pcloud: %v", err)
return nil, errors.Wrap(err, "failed to configure Pcloud")
}
f := &Fs{

View File

@@ -69,7 +69,7 @@ func init() {
}},
}, {
Name: "connection_retries",
Help: "Number of connnection retries.",
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}},

View File

@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
// Do one read to determine if we have more than one part
reader, _, err := u.nextReader()
if err == io.EOF { // single part
fs.Debugf(u, "Tried to upload a singile object to QingStor")
fs.Debugf(u, "Uploading as single part object to QingStor")
return u.singlePartUpload(reader)
} else if err != nil {
return errors.Errorf("read upload data failed: %s", err)
}
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
fs.Debugf(u, "Uploading as multi-part object to QingStor")
mu := multiUploader{uploader: u}
return mu.multiPartUpload(reader)
}
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
req := qs.InitiateMultipartUploadInput{
ContentType: &mu.cfg.mimeType,
}
fs.Debugf(mu, "Tried to initiate a multi-part upload")
fs.Debugf(mu, "Initiating a multi-part upload")
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
if err == nil {
mu.uploadID = rsp.UploadID
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
ContentLength: &c.size,
Body: c.buffer,
}
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
if err != nil {
return err
}
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
mu.mtx.Lock()
defer mu.mtx.Unlock()
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
req := qs.ListMultipartInput{
UploadID: mu.uploadID,
}
fs.Debugf(mu, "Tried to list a multi-part")
fs.Debugf(mu, "Reading multi-part details")
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
if err == nil {
mu.objectParts = rsp.ObjectParts
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
ObjectParts: mu.objectParts,
ETag: &md5String,
}
fs.Debugf(mu, "Tried to complete a multi-part")
fs.Debugf(mu, "Completing multi-part object")
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
if err == nil {
fs.Debugf(mu, "Complete multi-part finished")
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
req := qs.AbortMultipartUploadInput{
UploadID: uploadID,
}
fs.Debugf(mu, "Tried to abort a multi-part")
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
}

View File

@@ -291,7 +291,11 @@ func init() {
Provider: "DigitalOcean",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi Object Storage",
Help: "Wasabi US East endpoint",
Provider: "Wasabi",
}, {
Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West endpoint",
Provider: "Wasabi",
}},
}, {
@@ -539,13 +543,24 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
}, {
Value: "ONEZONE_IA",
Help: "One Zone Infrequent Access storage class",
}, {
Value: "GLACIER",
Help: "Glacier storage class",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
Any files larger than this will be uploaded in chunks of this
size. The default is 5MB. The minimum is 5MB.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
@@ -573,7 +588,7 @@ concurrently.
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 2,
Default: 4,
Advanced: true,
}, {
Name: "force_path_style",
@@ -603,14 +618,16 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// Options defines the configuration for this backend
@@ -626,6 +643,7 @@ type Options struct {
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
@@ -647,6 +665,7 @@ type Fs struct {
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
pacer *pacer.Pacer // To pace the API calls
srv *http.Client // a plain http client
}
// Object describes a s3 object
@@ -804,8 +823,21 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle)
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
ses := session.New()
c := s3.New(ses, awsConfig)
awsSessionOpts := session.Options{
Config: *awsConfig,
}
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
return nil, nil, err
}
c := s3.New(ses)
if opt.V2Auth || opt.Region == "other-v2-signature" {
fs.Debugf(nil, "Using v2 auth")
signer := func(req *request.Request) {
@@ -837,6 +869,21 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -849,6 +896,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "s3: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "s3: upload cutoff")
}
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
@@ -865,6 +916,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
bucket: bucket,
ses: ses,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
srv: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -1539,38 +1591,46 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
modTime := src.ModTime()
size := src.Size()
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = o.fs.opt.UploadConcurrency
u.LeavePartsOnError = false
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var uploader *s3manager.Uploader
if multipart {
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = o.fs.opt.UploadConcurrency
u.LeavePartsOnError = false
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
if size == -1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
return
}
// Adjust PartSize until the number of parts is small enough.
if size/u.PartSize >= s3manager.MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
}
})
if size == -1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
return
}
// Adjust PartSize until the number of parts is small enough.
if size/u.PartSize >= s3manager.MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
}
})
}
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
// read the md5sum if available for non multpart and if
// disable checksum isn't present.
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
hash, err := src.Hash(hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)
if err == nil {
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if multipart {
metadata[metaMD5Hash] = &md5sum
}
}
}
}
@@ -1579,30 +1639,98 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
mimeType := fs.MimeType(src)
key := o.fs.root + o.remote
req := s3manager.UploadInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
Body: in,
ContentType: &mimeType,
Metadata: metadata,
//ContentLength: &size,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.Upload(&req)
return shouldRetry(err)
})
if err != nil {
return err
if multipart {
req := s3manager.UploadInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
Body: in,
ContentType: &mimeType,
Metadata: metadata,
//ContentLength: &size,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.Upload(&req)
return shouldRetry(err)
})
if err != nil {
return err
}
} else {
req := s3.PutObjectInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
ContentType: &mimeType,
Metadata: metadata,
}
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
// Create the request
putObj, _ := o.fs.c.PutObjectRequest(&req)
// Sign it so we can upload using a presigned request.
//
// Note the SDK doesn't currently support streaming to
// PutObject so we'll use this work-around.
url, headers, err := putObj.PresignRequest(15 * time.Minute)
if err != nil {
return errors.Wrap(err, "s3 upload: sign request")
}
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
}
// create the vanilla http request
httpReq, err := http.NewRequest("PUT", url, in)
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
// set the headers we signed and the length
httpReq.Header = headers
httpReq.ContentLength = size
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {
return shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
}
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
})
if err != nil {
return err
}
}
// Read the metadata from the newly created object

View File

@@ -23,4 +23,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -28,7 +28,7 @@ import (
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/pkg/sftp"
"github.com/xanzy/ssh-agent"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/time/rate"
)
@@ -594,12 +594,22 @@ func (f *Fs) Mkdir(dir string) error {
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(dir string) error {
// Check to see if directory is empty as some servers will
// delete recursively with RemoveDirectory
entries, err := f.List(dir)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
}
// Remove the directory
root := path.Join(f.root, dir)
c, err := f.getSftpConnection()
if err != nil {
return errors.Wrap(err, "Rmdir")
}
err = c.sftpClient.Remove(root)
err = c.sftpClient.RemoveDirectory(root)
f.putSftpConnection(&c, err)
return err
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/swift"
"github.com/pkg/errors"
)
@@ -30,6 +31,7 @@ const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.GibiByte
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// SharedOptions are shared between swift and hubic
@@ -41,6 +43,20 @@ Above this size files will be chunked into a _segments container. The
default for this is 5GB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "no_chunk",
Help: `Don't chunk files during streaming upload.
When doing streaming uploads (eg using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5GB. However non chunked
files are easier to deal with and have an MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}}
// Register with Fs
@@ -173,6 +189,7 @@ type Options struct {
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
}
// Fs represents a remote swift server
@@ -187,16 +204,20 @@ type Fs struct {
containerOK bool // true if we have created the container
segmentsContainer string // container to store the segments (if any) in
noCheckContainer bool // don't check the container before creating it
pacer *pacer.Pacer // To pace the API calls
}
// Object describes a swift object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
info swift.Object // Info from the swift object if known
headers swift.Headers // The object headers if known
fs *Fs // what this object is part of
remote string // The remote path
size int64
lastModified time.Time
contentType string
md5 string
headers swift.Headers // The object headers if known
}
// ------------------------------------------------------------
@@ -227,6 +248,32 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
408, // Request Timeout
409, // Conflict - various states that could be resolved on a retry
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable/Slow Down - "Reduce your request rate"
504, // Gateway Time-out
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
// If this is an swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
if swiftError.StatusCode == e {
return true, err
}
}
}
// Check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// Pattern to match a swift path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
@@ -337,6 +384,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
segmentsContainer: container + "_segments",
root: directory,
noCheckContainer: noCheckContainer,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -346,7 +394,11 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
if f.root != "" {
f.root += "/"
// Check to see if the object exists - ignoring directory markers
info, _, err := f.c.Object(container, directory)
var info swift.Object
err = f.pacer.Call(func() (bool, error) {
info, _, err = f.c.Object(container, directory)
return shouldRetry(err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
f.root = path.Dir(directory)
if f.root == "." {
@@ -398,7 +450,10 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
}
if info != nil {
// Set info but not headers
o.info = *info
err := o.decodeMetaData(info)
if err != nil {
return nil, err
}
} else {
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
@@ -436,7 +491,12 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
}
rootLength := len(root)
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
objects, err := f.c.Objects(container, opts)
var objects []swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(container, opts)
return shouldRetry(err)
})
if err == nil {
for i := range objects {
object := &objects[i]
@@ -525,7 +585,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
containers, err := f.c.ContainersAll(nil)
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
@@ -586,7 +650,12 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
containers, err := f.c.ContainersAll(nil)
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
@@ -636,14 +705,20 @@ func (f *Fs) Mkdir(dir string) error {
// Check to see if container exists first
var err error = swift.ContainerNotFound
if !f.noCheckContainer {
_, _, err = f.c.Container(f.container)
err = f.pacer.Call(func() (bool, error) {
_, _, err = f.c.Container(f.container)
return shouldRetry(err)
})
}
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.c.ContainerCreate(f.container, headers)
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(f.container, headers)
return shouldRetry(err)
})
}
if err == nil {
f.containerOK = true
@@ -660,7 +735,11 @@ func (f *Fs) Rmdir(dir string) error {
if f.root != "" || dir != "" {
return nil
}
err := f.c.ContainerDelete(f.container)
var err error
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerDelete(f.container)
return shouldRetry(err)
})
if err == nil {
f.containerOK = false
}
@@ -719,7 +798,10 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -768,7 +850,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
fs.Debugf(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.info.Hash), nil
return strings.ToLower(o.md5), nil
}
// hasHeader checks for the header passed in returning false if the
@@ -797,7 +879,22 @@ func (o *Object) isStaticLargeObject() (bool, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.info.Bytes
return o.size
}
// decodeMetaData sets the metadata in the object from a swift.Object
//
// Sets
// o.lastModified
// o.size
// o.md5
// o.contentType
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
o.lastModified = info.LastModified
o.size = info.Bytes
o.md5 = info.Hash
o.contentType = info.ContentType
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
@@ -809,15 +906,23 @@ func (o *Object) readMetaData() (err error) {
if o.headers != nil {
return nil
}
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
var info swift.Object
var h swift.Headers
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
return shouldRetry(err)
})
if err != nil {
if err == swift.ObjectNotFound {
return fs.ErrorObjectNotFound
}
return err
}
o.info = info
o.headers = h
err = o.decodeMetaData(&info)
if err != nil {
return err
}
return nil
}
@@ -828,17 +933,17 @@ func (o *Object) readMetaData() (err error) {
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
if fs.Config.UseServerModTime {
return o.info.LastModified
return o.lastModified
}
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.info.LastModified
return o.lastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
if err != nil {
// fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.info.LastModified
return o.lastModified
}
return modTime
}
@@ -861,7 +966,10 @@ func (o *Object) SetModTime(modTime time.Time) error {
newHeaders[k] = v
}
}
return o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
return shouldRetry(err)
})
}
// Storable returns if this object is storable
@@ -869,14 +977,17 @@ func (o *Object) SetModTime(modTime time.Time) error {
// It compares the Content-Type to directoryMarkerContentType - that
// makes it a directory marker which is not storable.
func (o *Object) Storable() bool {
return o.info.ContentType != directoryMarkerContentType
return o.contentType != directoryMarkerContentType
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
headers := fs.OpenOptionHeaders(options)
_, isRanging := headers["Range"]
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
err = o.fs.pacer.Call(func() (bool, error) {
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
return shouldRetry(err)
})
return
}
@@ -903,13 +1014,20 @@ func (o *Object) removeSegments(except string) error {
}
segmentPath := segmentsRoot + remote
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
var err error
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
return shouldRetry(err)
})
})
if err != nil {
return err
}
// remove the segments container if empty, ignore errors
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
}
@@ -938,13 +1056,19 @@ func urlEncode(str string) string {
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
// Create the segmentsContainer if it doesn't exist
var err error
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
err = o.fs.pacer.Call(func() (bool, error) {
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
return shouldRetry(err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if o.fs.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
return shouldRetry(err)
})
}
if err != nil {
return "", err
@@ -973,7 +1097,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
return shouldRetry(err)
})
if err != nil {
return "", err
}
@@ -984,7 +1111,10 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
manifestName := o.fs.root + o.remote
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
return shouldRetry(err)
})
return uniquePrefix + "/", err
}
@@ -1014,17 +1144,31 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
contentType := fs.MimeType(src)
headers := m.ObjectHeaders()
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
if err != nil {
return err
}
o.headers = nil // wipe old metadata
} else {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
if size >= 0 {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
}
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
return shouldRetry(err)
})
if err != nil {
return err
}
// set Metadata since ObjectPut checked the hash and length so we know the
// object has been safely uploaded
o.lastModified = modTime
o.size = size
o.md5 = rxHeaders["ETag"]
o.contentType = contentType
o.headers = headers
}
// If file was a dynamic large object then remove old/all segments
@@ -1035,8 +1179,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
}
// Read the metadata from the newly created object
o.headers = nil // wipe old metadata
// Read the metadata from the newly created object if necessary
return o.readMetaData()
}
@@ -1047,7 +1190,10 @@ func (o *Object) Remove() error {
return err
}
// Remove file/manifest first
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
return shouldRetry(err)
})
if err != nil {
return err
}
@@ -1063,7 +1209,7 @@ func (o *Object) Remove() error {
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
return o.info.ContentType
return o.contentType
}
// Check the interfaces are satisfied

View File

@@ -6,7 +6,10 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
)
const (
@@ -148,6 +151,8 @@ var timeFormats = []string{
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
}
var oneTimeError sync.Once
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
@@ -171,5 +176,14 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
break
}
}
if err != nil {
oneTimeError.Do(func() {
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
})
// Return the epoch instead
*t = Time(time.Unix(0, 0))
// ignore error
err = nil
}
return err
}

View File

@@ -31,7 +31,6 @@ import (
"github.com/ncw/rclone/backend/webdav/api"
"github.com/ncw/rclone/backend/webdav/odrvcookie"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
@@ -96,10 +95,11 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
URL string `config:"url"`
Vendor string `config:"vendor"`
User string `config:"user"`
Pass string `config:"pass"`
URL string `config:"url"`
Vendor string `config:"vendor"`
User string `config:"user"`
Pass string `config:"pass"`
BearerToken string `config:"bearer_token"`
}
// Fs represents a remote webdav
@@ -283,9 +283,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
rootIsDir := strings.HasSuffix(root, "/")
root = strings.Trim(root, "/")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
bearerToken := config.FileGet(name, "bearer_token")
if !strings.HasSuffix(opt.URL, "/") {
opt.URL += "/"
}
@@ -320,10 +317,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
if user != "" || pass != "" {
if opt.User != "" || opt.Pass != "" {
f.srv.SetUserPass(opt.User, opt.Pass)
} else if bearerToken != "" {
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
} else if opt.BearerToken != "" {
f.srv.SetHeader("Authorization", "BEARER "+opt.BearerToken)
}
f.srv.SetErrorHandler(errorHandler)
err = f.setQuirks(opt.Vendor)
@@ -604,10 +601,9 @@ func (f *Fs) mkParentDir(dirPath string) error {
return f.mkdir(parent)
}
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(dirPath string) error {
// defer log.Trace(dirPath, "")("")
// We assume the root is already ceated
// low level mkdir, only makes the directory, doesn't attempt to create parents
func (f *Fs) _mkdir(dirPath string) error {
// We assume the root is already created
if dirPath == "" {
return nil
}
@@ -620,20 +616,26 @@ func (f *Fs) mkdir(dirPath string) error {
Path: dirPath,
NoResponse: true,
}
err := f.pacer.Call(func() (bool, error) {
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(dirPath string) error {
// defer log.Trace(dirPath, "")("")
err := f._mkdir(dirPath)
if apiErr, ok := err.(*api.Error); ok {
// already exists
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
return nil
}
// parent does not exists
// parent does not exist
if apiErr.StatusCode == http.StatusConflict {
err = f.mkParentDir(dirPath)
if err == nil {
err = f.mkdir(dirPath)
err = f._mkdir(dirPath)
}
}
}

View File

@@ -1,34 +0,0 @@
package src
//from yadisk
import (
"io"
"net/http"
)
//RootAddr is the base URL for Yandex Disk API.
const RootAddr = "https://cloud-api.yandex.com" //also https://cloud-api.yandex.net and https://cloud-api.yandex.ru
func (c *Client) setRequestScope(req *http.Request) {
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", "OAuth "+c.token)
}
func (c *Client) scopedRequest(method, urlPath string, body io.Reader) (*http.Request, error) {
fullURL := RootAddr
if urlPath[:1] != "/" {
fullURL += "/" + urlPath
} else {
fullURL += urlPath
}
req, err := http.NewRequest(method, fullURL, body)
if err != nil {
return req, err
}
c.setRequestScope(req)
return req, nil
}

View File

@@ -1,133 +0,0 @@
package src
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
)
//Client struct
type Client struct {
token string
basePath string
HTTPClient *http.Client
}
//NewClient creates new client
func NewClient(token string, client ...*http.Client) *Client {
return newClientInternal(
token,
"https://cloud-api.yandex.com/v1/disk", //also "https://cloud-api.yandex.net/v1/disk" "https://cloud-api.yandex.ru/v1/disk"
client...)
}
func newClientInternal(token string, basePath string, client ...*http.Client) *Client {
c := &Client{
token: token,
basePath: basePath,
}
if len(client) != 0 {
c.HTTPClient = client[0]
} else {
c.HTTPClient = http.DefaultClient
}
return c
}
//ErrorHandler type
type ErrorHandler func(*http.Response) error
var defaultErrorHandler ErrorHandler = func(resp *http.Response) error {
if resp.StatusCode/100 == 5 {
return errors.New("server error")
}
if resp.StatusCode/100 == 4 {
var response DiskClientError
contents, _ := ioutil.ReadAll(resp.Body)
err := json.Unmarshal(contents, &response)
if err != nil {
return err
}
return response
}
if resp.StatusCode/100 == 3 {
return errors.New("redirect error")
}
return nil
}
func (HTTPRequest *HTTPRequest) run(client *Client) ([]byte, error) {
var err error
values := make(url.Values)
for k, v := range HTTPRequest.Parameters {
values.Set(k, fmt.Sprintf("%v", v))
}
var req *http.Request
if HTTPRequest.Method == "POST" {
// TODO json serialize
req, err = http.NewRequest(
"POST",
client.basePath+HTTPRequest.Path,
strings.NewReader(values.Encode()))
if err != nil {
return nil, err
}
// TODO
// req.Header.Set("Content-Type", "application/json")
} else {
req, err = http.NewRequest(
HTTPRequest.Method,
client.basePath+HTTPRequest.Path+"?"+values.Encode(),
nil)
if err != nil {
return nil, err
}
}
for headerName := range HTTPRequest.Headers {
var headerValues = HTTPRequest.Headers[headerName]
for _, headerValue := range headerValues {
req.Header.Set(headerName, headerValue)
}
}
return runRequest(client, req)
}
func runRequest(client *Client, req *http.Request) ([]byte, error) {
return runRequestWithErrorHandler(client, req, defaultErrorHandler)
}
func runRequestWithErrorHandler(client *Client, req *http.Request, errorHandler ErrorHandler) (out []byte, err error) {
resp, err := client.HTTPClient.Do(req)
if err != nil {
return nil, err
}
defer CheckClose(resp.Body, &err)
return checkResponseForErrorsWithErrorHandler(resp, errorHandler)
}
func checkResponseForErrorsWithErrorHandler(resp *http.Response, errorHandler ErrorHandler) ([]byte, error) {
if resp.StatusCode/100 > 2 {
return nil, errorHandler(resp)
}
return ioutil.ReadAll(resp.Body)
}
// CheckClose is a utility function used to check the return from
// Close in a defer statement.
func CheckClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
}

View File

@@ -1,51 +0,0 @@
package src
import (
"bytes"
"encoding/json"
"io"
"net/url"
)
//CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
type CustomPropertyResponse struct {
CustomProperties map[string]interface{} `json:"custom_properties"`
}
//SetCustomProperty will set specified data from Yandex Disk
func (c *Client) SetCustomProperty(remotePath string, property string, value string) error {
rcm := map[string]interface{}{
property: value,
}
cpr := CustomPropertyResponse{rcm}
data, _ := json.Marshal(cpr)
body := bytes.NewReader(data)
err := c.SetCustomPropertyRequest(remotePath, body)
if err != nil {
return err
}
return err
}
//SetCustomPropertyRequest will make an CustomProperty request and return a URL to CustomProperty data to.
func (c *Client) SetCustomPropertyRequest(remotePath string, body io.Reader) (err error) {
values := url.Values{}
values.Add("path", remotePath)
req, err := c.scopedRequest("PATCH", "/v1/disk/resources?"+values.Encode(), body)
if err != nil {
return err
}
resp, err := c.HTTPClient.Do(req)
if err != nil {
return err
}
if err := CheckAPIError(resp); err != nil {
return err
}
defer CheckClose(resp.Body, &err)
//If needed we can read response and check if custom_property is set.
return nil
}

View File

@@ -1,23 +0,0 @@
package src
import (
"net/url"
"strconv"
)
// Delete will remove specified file/folder from Yandex Disk
func (c *Client) Delete(remotePath string, permanently bool) error {
values := url.Values{}
values.Add("permanently", strconv.FormatBool(permanently))
values.Add("path", remotePath)
urlPath := "/v1/disk/resources?" + values.Encode()
fullURL := RootAddr
if urlPath[:1] != "/" {
fullURL += "/" + urlPath
} else {
fullURL += urlPath
}
return c.PerformDelete(fullURL)
}

View File

@@ -1,48 +0,0 @@
package src
import "encoding/json"
//DiskInfoRequest type
type DiskInfoRequest struct {
client *Client
HTTPRequest *HTTPRequest
}
func (req *DiskInfoRequest) request() *HTTPRequest {
return req.HTTPRequest
}
//DiskInfoResponse struct is returned by the API for DiskInfo request.
type DiskInfoResponse struct {
TrashSize uint64 `json:"TrashSize"`
TotalSpace uint64 `json:"TotalSpace"`
UsedSpace uint64 `json:"UsedSpace"`
SystemFolders map[string]string `json:"SystemFolders"`
}
//NewDiskInfoRequest create new DiskInfo Request
func (c *Client) NewDiskInfoRequest() *DiskInfoRequest {
return &DiskInfoRequest{
client: c,
HTTPRequest: createGetRequest(c, "/", nil),
}
}
//Exec run DiskInfo Request
func (req *DiskInfoRequest) Exec() (*DiskInfoResponse, error) {
data, err := req.request().run(req.client)
if err != nil {
return nil, err
}
var info DiskInfoResponse
err = json.Unmarshal(data, &info)
if err != nil {
return nil, err
}
if info.SystemFolders == nil {
info.SystemFolders = make(map[string]string)
}
return &info, nil
}

View File

@@ -1,66 +0,0 @@
package src
import (
"encoding/json"
"io"
"net/url"
)
// DownloadResponse struct is returned by the API for Download request.
type DownloadResponse struct {
HRef string `json:"href"`
Method string `json:"method"`
Templated bool `json:"templated"`
}
// Download will get specified data from Yandex.Disk supplying the extra headers
func (c *Client) Download(remotePath string, headers map[string]string) (io.ReadCloser, error) { //io.Writer
ur, err := c.DownloadRequest(remotePath)
if err != nil {
return nil, err
}
return c.PerformDownload(ur.HRef, headers)
}
// DownloadRequest will make an download request and return a URL to download data to.
func (c *Client) DownloadRequest(remotePath string) (ur *DownloadResponse, err error) {
values := url.Values{}
values.Add("path", remotePath)
req, err := c.scopedRequest("GET", "/v1/disk/resources/download?"+values.Encode(), nil)
if err != nil {
return nil, err
}
resp, err := c.HTTPClient.Do(req)
if err != nil {
return nil, err
}
if err := CheckAPIError(resp); err != nil {
return nil, err
}
defer CheckClose(resp.Body, &err)
ur, err = ParseDownloadResponse(resp.Body)
if err != nil {
return nil, err
}
return ur, nil
}
// ParseDownloadResponse tries to read and parse DownloadResponse struct.
func ParseDownloadResponse(data io.Reader) (*DownloadResponse, error) {
dec := json.NewDecoder(data)
var ur DownloadResponse
if err := dec.Decode(&ur); err == io.EOF {
// ok
} else if err != nil {
return nil, err
}
// TODO: check if there is any trash data after JSON and crash if there is.
return &ur, nil
}

View File

@@ -1,9 +0,0 @@
package src
// EmptyTrash will permanently delete all trashed files/folders from Yandex Disk
func (c *Client) EmptyTrash() error {
fullURL := RootAddr
fullURL += "/v1/disk/trash/resources"
return c.PerformDelete(fullURL)
}

View File

@@ -1,84 +0,0 @@
package src
//from yadisk
import (
"encoding/json"
"fmt"
"io"
"net/http"
)
// ErrorResponse represents erroneous API response.
// Implements go's built in `error`.
type ErrorResponse struct {
ErrorName string `json:"error"`
Description string `json:"description"`
Message string `json:"message"`
StatusCode int `json:""`
}
func (e *ErrorResponse) Error() string {
return fmt.Sprintf("[%d - %s] %s (%s)", e.StatusCode, e.ErrorName, e.Description, e.Message)
}
// ProccessErrorResponse tries to represent data passed as
// an ErrorResponse object.
func ProccessErrorResponse(data io.Reader) (*ErrorResponse, error) {
dec := json.NewDecoder(data)
var errorResponse ErrorResponse
if err := dec.Decode(&errorResponse); err == io.EOF {
// ok
} else if err != nil {
return nil, err
}
// TODO: check if there is any trash data after JSON and crash if there is.
return &errorResponse, nil
}
// CheckAPIError is a convenient function to turn erroneous
// API response into go error. It closes the Body on error.
func CheckAPIError(resp *http.Response) (err error) {
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
return nil
}
defer CheckClose(resp.Body, &err)
errorResponse, err := ProccessErrorResponse(resp.Body)
if err != nil {
return err
}
errorResponse.StatusCode = resp.StatusCode
return errorResponse
}
// ProccessErrorString tries to represent data passed as
// an ErrorResponse object.
func ProccessErrorString(data string) (*ErrorResponse, error) {
var errorResponse ErrorResponse
if err := json.Unmarshal([]byte(data), &errorResponse); err == nil {
// ok
} else if err != nil {
return nil, err
}
// TODO: check if there is any trash data after JSON and crash if there is.
return &errorResponse, nil
}
// ParseAPIError Parse json error response from API
func (c *Client) ParseAPIError(jsonErr string) (string, error) { //ErrorName
errorResponse, err := ProccessErrorString(jsonErr)
if err != nil {
return err.Error(), err
}
return errorResponse.ErrorName, nil
}

View File

@@ -1,14 +0,0 @@
package src
import "encoding/json"
//DiskClientError struct
type DiskClientError struct {
Description string `json:"Description"`
Code string `json:"Error"`
}
func (e DiskClientError) Error() string {
b, _ := json.Marshal(e)
return string(b)
}

View File

@@ -1,8 +0,0 @@
package src
// FilesResourceListResponse struct is returned by the API for requests.
type FilesResourceListResponse struct {
Items []ResourceInfoResponse `json:"items"`
Limit *uint64 `json:"limit"`
Offset *uint64 `json:"offset"`
}

View File

@@ -1,78 +0,0 @@
package src
import (
"encoding/json"
"strings"
)
// FlatFileListRequest struct client for FlatFileList Request
type FlatFileListRequest struct {
client *Client
HTTPRequest *HTTPRequest
}
// FlatFileListRequestOptions struct - options for request
type FlatFileListRequestOptions struct {
MediaType []MediaType
Limit *uint32
Offset *uint32
Fields []string
PreviewSize *PreviewSize
PreviewCrop *bool
}
// Request get request
func (req *FlatFileListRequest) Request() *HTTPRequest {
return req.HTTPRequest
}
// NewFlatFileListRequest create new FlatFileList Request
func (c *Client) NewFlatFileListRequest(options ...FlatFileListRequestOptions) *FlatFileListRequest {
var parameters = make(map[string]interface{})
if len(options) > 0 {
opt := options[0]
if opt.Limit != nil {
parameters["limit"] = *opt.Limit
}
if opt.Offset != nil {
parameters["offset"] = *opt.Offset
}
if opt.Fields != nil {
parameters["fields"] = strings.Join(opt.Fields, ",")
}
if opt.PreviewSize != nil {
parameters["preview_size"] = opt.PreviewSize.String()
}
if opt.PreviewCrop != nil {
parameters["preview_crop"] = *opt.PreviewCrop
}
if opt.MediaType != nil {
var strMediaTypes = make([]string, len(opt.MediaType))
for i, t := range opt.MediaType {
strMediaTypes[i] = t.String()
}
parameters["media_type"] = strings.Join(strMediaTypes, ",")
}
}
return &FlatFileListRequest{
client: c,
HTTPRequest: createGetRequest(c, "/resources/files", parameters),
}
}
// Exec run FlatFileList Request
func (req *FlatFileListRequest) Exec() (*FilesResourceListResponse, error) {
data, err := req.Request().run(req.client)
if err != nil {
return nil, err
}
var info FilesResourceListResponse
err = json.Unmarshal(data, &info)
if err != nil {
return nil, err
}
if cap(info.Items) == 0 {
info.Items = []ResourceInfoResponse{}
}
return &info, nil
}

View File

@@ -1,24 +0,0 @@
package src
// HTTPRequest struct
type HTTPRequest struct {
Method string
Path string
Parameters map[string]interface{}
Headers map[string][]string
}
func createGetRequest(client *Client, path string, params map[string]interface{}) *HTTPRequest {
return createRequest(client, "GET", path, params)
}
func createRequest(client *Client, method string, path string, parameters map[string]interface{}) *HTTPRequest {
var headers = make(map[string][]string)
headers["Authorization"] = []string{"OAuth " + client.token}
return &HTTPRequest{
Method: method,
Path: path,
Parameters: parameters,
Headers: headers,
}
}

View File

@@ -1,7 +0,0 @@
package src
// LastUploadedResourceListResponse struct
type LastUploadedResourceListResponse struct {
Items []ResourceInfoResponse `json:"items"`
Limit *uint64 `json:"limit"`
}

View File

@@ -1,74 +0,0 @@
package src
import (
"encoding/json"
"strings"
)
// LastUploadedResourceListRequest struct
type LastUploadedResourceListRequest struct {
client *Client
HTTPRequest *HTTPRequest
}
// LastUploadedResourceListRequestOptions struct
type LastUploadedResourceListRequestOptions struct {
MediaType []MediaType
Limit *uint32
Fields []string
PreviewSize *PreviewSize
PreviewCrop *bool
}
// Request return request
func (req *LastUploadedResourceListRequest) Request() *HTTPRequest {
return req.HTTPRequest
}
// NewLastUploadedResourceListRequest create new LastUploadedResourceList Request
func (c *Client) NewLastUploadedResourceListRequest(options ...LastUploadedResourceListRequestOptions) *LastUploadedResourceListRequest {
var parameters = make(map[string]interface{})
if len(options) > 0 {
opt := options[0]
if opt.Limit != nil {
parameters["limit"] = opt.Limit
}
if opt.Fields != nil {
parameters["fields"] = strings.Join(opt.Fields, ",")
}
if opt.PreviewSize != nil {
parameters["preview_size"] = opt.PreviewSize.String()
}
if opt.PreviewCrop != nil {
parameters["preview_crop"] = opt.PreviewCrop
}
if opt.MediaType != nil {
var strMediaTypes = make([]string, len(opt.MediaType))
for i, t := range opt.MediaType {
strMediaTypes[i] = t.String()
}
parameters["media_type"] = strings.Join(strMediaTypes, ",")
}
}
return &LastUploadedResourceListRequest{
client: c,
HTTPRequest: createGetRequest(c, "/resources/last-uploaded", parameters),
}
}
// Exec run LastUploadedResourceList Request
func (req *LastUploadedResourceListRequest) Exec() (*LastUploadedResourceListResponse, error) {
data, err := req.Request().run(req.client)
if err != nil {
return nil, err
}
var info LastUploadedResourceListResponse
err = json.Unmarshal(data, &info)
if err != nil {
return nil, err
}
if cap(info.Items) == 0 {
info.Items = []ResourceInfoResponse{}
}
return &info, nil
}

View File

@@ -1,144 +0,0 @@
package src
// MediaType struct - media types
type MediaType struct {
mediaType string
}
// Audio - media type
func (m *MediaType) Audio() *MediaType {
return &MediaType{
mediaType: "audio",
}
}
// Backup - media type
func (m *MediaType) Backup() *MediaType {
return &MediaType{
mediaType: "backup",
}
}
// Book - media type
func (m *MediaType) Book() *MediaType {
return &MediaType{
mediaType: "book",
}
}
// Compressed - media type
func (m *MediaType) Compressed() *MediaType {
return &MediaType{
mediaType: "compressed",
}
}
// Data - media type
func (m *MediaType) Data() *MediaType {
return &MediaType{
mediaType: "data",
}
}
// Development - media type
func (m *MediaType) Development() *MediaType {
return &MediaType{
mediaType: "development",
}
}
// Diskimage - media type
func (m *MediaType) Diskimage() *MediaType {
return &MediaType{
mediaType: "diskimage",
}
}
// Document - media type
func (m *MediaType) Document() *MediaType {
return &MediaType{
mediaType: "document",
}
}
// Encoded - media type
func (m *MediaType) Encoded() *MediaType {
return &MediaType{
mediaType: "encoded",
}
}
// Executable - media type
func (m *MediaType) Executable() *MediaType {
return &MediaType{
mediaType: "executable",
}
}
// Flash - media type
func (m *MediaType) Flash() *MediaType {
return &MediaType{
mediaType: "flash",
}
}
// Font - media type
func (m *MediaType) Font() *MediaType {
return &MediaType{
mediaType: "font",
}
}
// Image - media type
func (m *MediaType) Image() *MediaType {
return &MediaType{
mediaType: "image",
}
}
// Settings - media type
func (m *MediaType) Settings() *MediaType {
return &MediaType{
mediaType: "settings",
}
}
// Spreadsheet - media type
func (m *MediaType) Spreadsheet() *MediaType {
return &MediaType{
mediaType: "spreadsheet",
}
}
// Text - media type
func (m *MediaType) Text() *MediaType {
return &MediaType{
mediaType: "text",
}
}
// Unknown - media type
func (m *MediaType) Unknown() *MediaType {
return &MediaType{
mediaType: "unknown",
}
}
// Video - media type
func (m *MediaType) Video() *MediaType {
return &MediaType{
mediaType: "video",
}
}
// Web - media type
func (m *MediaType) Web() *MediaType {
return &MediaType{
mediaType: "web",
}
}
// String - media type
func (m *MediaType) String() string {
return m.mediaType
}

View File

@@ -1,21 +0,0 @@
package src
import (
"net/url"
)
// Mkdir will make specified folder on Yandex Disk
func (c *Client) Mkdir(remotePath string) (int, string, error) {
values := url.Values{}
values.Add("path", remotePath) // only one current folder will be created. Not all the folders in the path.
urlPath := "/v1/disk/resources?" + values.Encode()
fullURL := RootAddr
if urlPath[:1] != "/" {
fullURL += "/" + urlPath
} else {
fullURL += urlPath
}
return c.PerformMkdir(fullURL)
}

View File

@@ -1,35 +0,0 @@
package src
import (
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformDelete does the actual delete via DELETE request.
func (c *Client) PerformDelete(url string) error {
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
//set access token and headers
c.setRequestScope(req)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return err
}
//204 - resource deleted.
//202 - folder not empty, content will be deleted soon (async delete).
if resp.StatusCode != 204 && resp.StatusCode != 202 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body))
}
return nil
}

View File

@@ -1,40 +0,0 @@
package src
import (
"io"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformDownload does the actual download via unscoped GET request.
func (c *Client) PerformDownload(url string, headers map[string]string) (out io.ReadCloser, err error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
// Set any extra headers
for k, v := range headers {
req.Header.Set(k, v)
}
//c.setRequestScope(req)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return nil, err
}
_, isRanging := req.Header["Range"]
if !(resp.StatusCode == http.StatusOK || (isRanging && resp.StatusCode == http.StatusPartialContent)) {
defer CheckClose(resp.Body, &err)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body))
}
return resp.Body, err
}

View File

@@ -1,34 +0,0 @@
package src
import (
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformMkdir does the actual mkdir via PUT request.
func (c *Client) PerformMkdir(url string) (int, string, error) {
req, err := http.NewRequest("PUT", url, nil)
if err != nil {
return 0, "", err
}
//set access token and headers
c.setRequestScope(req)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return 0, "", err
}
if resp.StatusCode != 201 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return 0, "", err
}
//third parameter is the json error response body
return resp.StatusCode, string(body), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body))
}
return resp.StatusCode, "", nil
}

View File

@@ -1,38 +0,0 @@
package src
//from yadisk
import (
"io"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformUpload does the actual upload via unscoped PUT request.
func (c *Client) PerformUpload(url string, data io.Reader, contentType string) (err error) {
req, err := http.NewRequest("PUT", url, data)
if err != nil {
return err
}
req.Header.Set("Content-Type", contentType)
//c.setRequestScope(req)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return err
}
defer CheckClose(resp.Body, &err)
if resp.StatusCode != 201 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body))
}
return nil
}

View File

@@ -1,75 +0,0 @@
package src
import "fmt"
// PreviewSize struct
type PreviewSize struct {
size string
}
// PredefinedSizeS - set preview size
func (s *PreviewSize) PredefinedSizeS() *PreviewSize {
return &PreviewSize{
size: "S",
}
}
// PredefinedSizeM - set preview size
func (s *PreviewSize) PredefinedSizeM() *PreviewSize {
return &PreviewSize{
size: "M",
}
}
// PredefinedSizeL - set preview size
func (s *PreviewSize) PredefinedSizeL() *PreviewSize {
return &PreviewSize{
size: "L",
}
}
// PredefinedSizeXL - set preview size
func (s *PreviewSize) PredefinedSizeXL() *PreviewSize {
return &PreviewSize{
size: "XL",
}
}
// PredefinedSizeXXL - set preview size
func (s *PreviewSize) PredefinedSizeXXL() *PreviewSize {
return &PreviewSize{
size: "XXL",
}
}
// PredefinedSizeXXXL - set preview size
func (s *PreviewSize) PredefinedSizeXXXL() *PreviewSize {
return &PreviewSize{
size: "XXXL",
}
}
// ExactWidth - set preview size
func (s *PreviewSize) ExactWidth(width uint32) *PreviewSize {
return &PreviewSize{
size: fmt.Sprintf("%dx", width),
}
}
// ExactHeight - set preview size
func (s *PreviewSize) ExactHeight(height uint32) *PreviewSize {
return &PreviewSize{
size: fmt.Sprintf("x%d", height),
}
}
// ExactSize - set preview size
func (s *PreviewSize) ExactSize(width uint32, height uint32) *PreviewSize {
return &PreviewSize{
size: fmt.Sprintf("%dx%d", width, height),
}
}
func (s *PreviewSize) String() string {
return s.size
}

View File

@@ -1,19 +0,0 @@
package src
//ResourceInfoResponse struct is returned by the API for metedata requests.
type ResourceInfoResponse struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Created string `json:"created"`
CustomProperties map[string]interface{} `json:"custom_properties"`
Preview string `json:"preview"`
PublicURL string `json:"public_url"`
OriginPath string `json:"origin_path"`
Modified string `json:"modified"`
Path string `json:"path"`
Md5 string `json:"md5"`
ResourceType string `json:"type"`
MimeType string `json:"mime_type"`
Size uint64 `json:"size"`
Embedded *ResourceListResponse `json:"_embedded"`
}

View File

@@ -1,45 +0,0 @@
package src
import "encoding/json"
// ResourceInfoRequest struct
type ResourceInfoRequest struct {
client *Client
HTTPRequest *HTTPRequest
}
// Request of ResourceInfoRequest
func (req *ResourceInfoRequest) Request() *HTTPRequest {
return req.HTTPRequest
}
// NewResourceInfoRequest create new ResourceInfo Request
func (c *Client) NewResourceInfoRequest(path string, options ...ResourceInfoRequestOptions) *ResourceInfoRequest {
return &ResourceInfoRequest{
client: c,
HTTPRequest: createResourceInfoRequest(c, "/resources", path, options...),
}
}
// Exec run ResourceInfo Request
func (req *ResourceInfoRequest) Exec() (*ResourceInfoResponse, error) {
data, err := req.Request().run(req.client)
if err != nil {
return nil, err
}
var info ResourceInfoResponse
err = json.Unmarshal(data, &info)
if err != nil {
return nil, err
}
if info.CustomProperties == nil {
info.CustomProperties = make(map[string]interface{})
}
if info.Embedded != nil {
if cap(info.Embedded.Items) == 0 {
info.Embedded.Items = []ResourceInfoResponse{}
}
}
return &info, nil
}

View File

@@ -1,33 +0,0 @@
package src
import "strings"
func createResourceInfoRequest(c *Client,
apiPath string,
path string,
options ...ResourceInfoRequestOptions) *HTTPRequest {
var parameters = make(map[string]interface{})
parameters["path"] = path
if len(options) > 0 {
opt := options[0]
if opt.SortMode != nil {
parameters["sort"] = opt.SortMode.String()
}
if opt.Limit != nil {
parameters["limit"] = *opt.Limit
}
if opt.Offset != nil {
parameters["offset"] = *opt.Offset
}
if opt.Fields != nil {
parameters["fields"] = strings.Join(opt.Fields, ",")
}
if opt.PreviewSize != nil {
parameters["preview_size"] = opt.PreviewSize.String()
}
if opt.PreviewCrop != nil {
parameters["preview_crop"] = *opt.PreviewCrop
}
}
return createGetRequest(c, apiPath, parameters)
}

View File

@@ -1,11 +0,0 @@
package src
// ResourceInfoRequestOptions struct
type ResourceInfoRequestOptions struct {
SortMode *SortMode
Limit *uint32
Offset *uint32
Fields []string
PreviewSize *PreviewSize
PreviewCrop *bool
}

View File

@@ -1,12 +0,0 @@
package src
// ResourceListResponse struct
type ResourceListResponse struct {
Sort *SortMode `json:"sort"`
PublicKey string `json:"public_key"`
Items []ResourceInfoResponse `json:"items"`
Path string `json:"path"`
Limit *uint64 `json:"limit"`
Offset *uint64 `json:"offset"`
Total *uint64 `json:"total"`
}

View File

@@ -1,79 +0,0 @@
package src
import "strings"
// SortMode struct - sort mode
type SortMode struct {
mode string
}
// Default - sort mode
func (m *SortMode) Default() *SortMode {
return &SortMode{
mode: "",
}
}
// ByName - sort mode
func (m *SortMode) ByName() *SortMode {
return &SortMode{
mode: "name",
}
}
// ByPath - sort mode
func (m *SortMode) ByPath() *SortMode {
return &SortMode{
mode: "path",
}
}
// ByCreated - sort mode
func (m *SortMode) ByCreated() *SortMode {
return &SortMode{
mode: "created",
}
}
// ByModified - sort mode
func (m *SortMode) ByModified() *SortMode {
return &SortMode{
mode: "modified",
}
}
// BySize - sort mode
func (m *SortMode) BySize() *SortMode {
return &SortMode{
mode: "size",
}
}
// Reverse - sort mode
func (m *SortMode) Reverse() *SortMode {
if strings.HasPrefix(m.mode, "-") {
return &SortMode{
mode: m.mode[1:],
}
}
return &SortMode{
mode: "-" + m.mode,
}
}
func (m *SortMode) String() string {
return m.mode
}
// UnmarshalJSON sort mode
func (m *SortMode) UnmarshalJSON(value []byte) error {
if value == nil || len(value) == 0 {
m.mode = ""
return nil
}
m.mode = string(value)
if strings.HasPrefix(m.mode, "\"") && strings.HasSuffix(m.mode, "\"") {
m.mode = m.mode[1 : len(m.mode)-1]
}
return nil
}

View File

@@ -1,45 +0,0 @@
package src
import "encoding/json"
// TrashResourceInfoRequest struct
type TrashResourceInfoRequest struct {
client *Client
HTTPRequest *HTTPRequest
}
// Request of TrashResourceInfoRequest struct
func (req *TrashResourceInfoRequest) Request() *HTTPRequest {
return req.HTTPRequest
}
// NewTrashResourceInfoRequest create new TrashResourceInfo Request
func (c *Client) NewTrashResourceInfoRequest(path string, options ...ResourceInfoRequestOptions) *TrashResourceInfoRequest {
return &TrashResourceInfoRequest{
client: c,
HTTPRequest: createResourceInfoRequest(c, "/trash/resources", path, options...),
}
}
// Exec run TrashResourceInfo Request
func (req *TrashResourceInfoRequest) Exec() (*ResourceInfoResponse, error) {
data, err := req.Request().run(req.client)
if err != nil {
return nil, err
}
var info ResourceInfoResponse
err = json.Unmarshal(data, &info)
if err != nil {
return nil, err
}
if info.CustomProperties == nil {
info.CustomProperties = make(map[string]interface{})
}
if info.Embedded != nil {
if cap(info.Embedded.Items) == 0 {
info.Embedded.Items = []ResourceInfoResponse{}
}
}
return &info, nil
}

157
backend/yandex/api/types.go Normal file
View File

@@ -0,0 +1,157 @@
package api
import (
"fmt"
"strings"
)
// DiskInfo contains disk metadata
type DiskInfo struct {
TotalSpace int64 `json:"total_space"`
UsedSpace int64 `json:"used_space"`
TrashSize int64 `json:"trash_size"`
}
// ResourceInfoRequestOptions struct
type ResourceInfoRequestOptions struct {
SortMode *SortMode
Limit uint64
Offset uint64
Fields []string
}
//ResourceInfoResponse struct is returned by the API for metedata requests.
type ResourceInfoResponse struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Created string `json:"created"`
CustomProperties map[string]interface{} `json:"custom_properties"`
Preview string `json:"preview"`
PublicURL string `json:"public_url"`
OriginPath string `json:"origin_path"`
Modified string `json:"modified"`
Path string `json:"path"`
Md5 string `json:"md5"`
ResourceType string `json:"type"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
Embedded *ResourceListResponse `json:"_embedded"`
}
// ResourceListResponse struct
type ResourceListResponse struct {
Sort *SortMode `json:"sort"`
PublicKey string `json:"public_key"`
Items []ResourceInfoResponse `json:"items"`
Path string `json:"path"`
Limit *uint64 `json:"limit"`
Offset *uint64 `json:"offset"`
Total *uint64 `json:"total"`
}
// AsyncInfo struct is returned by the API for various async operations.
type AsyncInfo struct {
HRef string `json:"href"`
Method string `json:"method"`
Templated bool `json:"templated"`
}
// AsyncStatus is returned when requesting the status of an async operations. Possble values in-progress, success, failure
type AsyncStatus struct {
Status string `json:"status"`
}
//CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
type CustomPropertyResponse struct {
CustomProperties map[string]interface{} `json:"custom_properties"`
}
// SortMode struct - sort mode
type SortMode struct {
mode string
}
// Default - sort mode
func (m *SortMode) Default() *SortMode {
return &SortMode{
mode: "",
}
}
// ByName - sort mode
func (m *SortMode) ByName() *SortMode {
return &SortMode{
mode: "name",
}
}
// ByPath - sort mode
func (m *SortMode) ByPath() *SortMode {
return &SortMode{
mode: "path",
}
}
// ByCreated - sort mode
func (m *SortMode) ByCreated() *SortMode {
return &SortMode{
mode: "created",
}
}
// ByModified - sort mode
func (m *SortMode) ByModified() *SortMode {
return &SortMode{
mode: "modified",
}
}
// BySize - sort mode
func (m *SortMode) BySize() *SortMode {
return &SortMode{
mode: "size",
}
}
// Reverse - sort mode
func (m *SortMode) Reverse() *SortMode {
if strings.HasPrefix(m.mode, "-") {
return &SortMode{
mode: m.mode[1:],
}
}
return &SortMode{
mode: "-" + m.mode,
}
}
func (m *SortMode) String() string {
return m.mode
}
// UnmarshalJSON sort mode
func (m *SortMode) UnmarshalJSON(value []byte) error {
if value == nil || len(value) == 0 {
m.mode = ""
return nil
}
m.mode = string(value)
if strings.HasPrefix(m.mode, "\"") && strings.HasSuffix(m.mode, "\"") {
m.mode = m.mode[1 : len(m.mode)-1]
}
return nil
}
// ErrorResponse represents erroneous API response.
// Implements go's built in `error`.
type ErrorResponse struct {
ErrorName string `json:"error"`
Description string `json:"description"`
Message string `json:"message"`
StatusCode int `json:""`
}
func (e *ErrorResponse) Error() string {
return fmt.Sprintf("[%d - %s] %s (%s)", e.StatusCode, e.ErrorName, e.Description, e.Message)
}

View File

@@ -1,71 +0,0 @@
package src
//from yadisk
import (
"encoding/json"
"io"
"net/url"
"strconv"
)
// UploadResponse struct is returned by the API for upload request.
type UploadResponse struct {
HRef string `json:"href"`
Method string `json:"method"`
Templated bool `json:"templated"`
}
// Upload will put specified data to Yandex.Disk.
func (c *Client) Upload(data io.Reader, remotePath string, overwrite bool, contentType string) error {
ur, err := c.UploadRequest(remotePath, overwrite)
if err != nil {
return err
}
return c.PerformUpload(ur.HRef, data, contentType)
}
// UploadRequest will make an upload request and return a URL to upload data to.
func (c *Client) UploadRequest(remotePath string, overwrite bool) (ur *UploadResponse, err error) {
values := url.Values{}
values.Add("path", remotePath)
values.Add("overwrite", strconv.FormatBool(overwrite))
req, err := c.scopedRequest("GET", "/v1/disk/resources/upload?"+values.Encode(), nil)
if err != nil {
return nil, err
}
resp, err := c.HTTPClient.Do(req)
if err != nil {
return nil, err
}
if err := CheckAPIError(resp); err != nil {
return nil, err
}
defer CheckClose(resp.Body, &err)
ur, err = ParseUploadResponse(resp.Body)
if err != nil {
return nil, err
}
return ur, nil
}
// ParseUploadResponse tries to read and parse UploadResponse struct.
func ParseUploadResponse(data io.Reader) (*UploadResponse, error) {
dec := json.NewDecoder(data)
var ur UploadResponse
if err := dec.Decode(&ur); err == io.EOF {
// ok
} else if err != nil {
return nil, err
}
// TODO: check if there is any trash data after JSON and crash if there is.
return &ur, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -63,7 +63,9 @@ var osarches = []string{
// Special environment flags for a given arch
var archFlags = map[string][]string{
"386": {"GO386=387"},
"386": {"GO386=387"},
"mips": {"GOMIPS=softfloat"},
"mipsle": {"GOMIPS=softfloat"},
}
// runEnv - run a shell command with env

59
bin/test_independence.go Normal file
View File

@@ -0,0 +1,59 @@
// +build ignore
// Test that the tests in the suite passed in are independent
package main
import (
"flag"
"log"
"os"
"os/exec"
"regexp"
)
var matchLine = regexp.MustCompile(`(?m)^=== RUN\s*(TestIntegration/\S*)\s*$`)
// run the test pass in and grep out the test names
func findTests(packageToTest string) (tests []string) {
cmd := exec.Command("go", "test", "-v", packageToTest)
out, err := cmd.CombinedOutput()
if err != nil {
_, _ = os.Stderr.Write(out)
log.Fatal(err)
}
results := matchLine.FindAllSubmatch(out, -1)
if results == nil {
log.Fatal("No tests found")
}
for _, line := range results {
tests = append(tests, string(line[1]))
}
return tests
}
// run the test passed in with the -run passed in
func runTest(packageToTest string, testName string) {
cmd := exec.Command("go", "test", "-v", packageToTest, "-run", "^"+testName+"$")
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("%s FAILED ------------------", testName)
_, _ = os.Stderr.Write(out)
log.Printf("%s FAILED ------------------", testName)
} else {
log.Printf("%s OK", testName)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Syntax: %s <test_to_run>", os.Args[0])
}
packageToTest := args[0]
testNames := findTests(packageToTest)
// fmt.Printf("%s\n", testNames)
for _, testName := range testNames {
runTest(packageToTest, testName)
}
}

View File

@@ -51,7 +51,7 @@ var (
errorCommandNotFound = errors.New("command not found")
errorUncategorized = errors.New("uncategorized error")
errorNotEnoughArguments = errors.New("not enough arguments")
errorTooManyArguents = errors.New("too many arguments")
errorTooManyArguments = errors.New("too many arguments")
)
const (
@@ -294,14 +294,12 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
if len(args) < MinArgs {
_ = cmd.Usage()
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum\n", cmd.Name(), MinArgs)
// os.Exit(1)
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum: you provided %d non flag arguments: %q\n", cmd.Name(), MinArgs, len(args), args)
resolveExitCode(errorNotEnoughArguments)
} else if len(args) > MaxArgs {
_ = cmd.Usage()
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
// os.Exit(1)
resolveExitCode(errorTooManyArguents)
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum: you provided %d non flag arguments: %q\n", cmd.Name(), MaxArgs, len(args), args)
resolveExitCode(errorTooManyArguments)
}
}

View File

@@ -53,7 +53,6 @@ func mountOptions(device string, mountpoint string) (options []string) {
// OSX options
if runtime.GOOS == "darwin" {
options = append(options, "-o", "volname="+mountlib.VolumeName)
if mountlib.NoAppleDouble {
options = append(options, "-o", "noappledouble")
}
@@ -70,6 +69,11 @@ func mountOptions(device string, mountpoint string) (options []string) {
options = append(options, "--FileSystemName=rclone")
}
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
if mountlib.VolumeName != "" {
options = append(options, "-o", "volname="+mountlib.VolumeName)
}
}
if mountlib.AllowNonEmpty {
options = append(options, "-o", "nonempty")
}

View File

@@ -51,6 +51,17 @@ written a trailing / - meaning "copy the contents of this directory".
This applies to all commands and whether you are talking about the
source or destination.
See the [--no-traverse](/docs/#no-traverse) option for controlling
whether rclone lists the destination directory or not. Supplying this
option when copying a small number of files into a large destination
can speed transfers up greatly.
For example, if you have many files in /path/to/src but only a few of
them change every day, you can to copy all the files which have
changed recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
`,
Run: func(command *cobra.Command, args []string) {

View File

@@ -138,6 +138,7 @@ func (r *results) checkChar(c rune) {
escape := false
if err != nil {
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
escape = true
} else {
fs.Infof(r.f, "OK writing file 0x%02X", c)
}

View File

@@ -7,7 +7,7 @@ package mountlib
import (
"log"
"github.com/sevlyar/go-daemon"
daemon "github.com/sevlyar/go-daemon"
)
func startBackgroundMode() bool {

View File

@@ -147,7 +147,7 @@ systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone ` + commandName + `
can't use retries in the same way without making local copies of the
uploads. Look at the [file caching](#file-caching)
for solutions to make ` + commandName + ` mount more reliable.
for solutions to make ` + commandName + ` more reliable.
### Attribute caching

View File

@@ -37,6 +37,11 @@ into ` + "`dest:path`" + ` then delete the original (if no errors on copy) in
If you want to delete empty source directories after move, use the --delete-empty-src-dirs flag.
See the [--no-traverse](/docs/#no-traverse) option for controlling
whether rclone lists the destination directory or not. Supplying this
option when moving a small number of files into a large destination
can speed transfers up greatly.
**Important**: Since this can cause data loss, test first with the
--dry-run flag.

View File

@@ -424,20 +424,13 @@ func (u *UI) removeEntry(pos int) {
func (u *UI) delete() {
dirPos := u.sortPerm[u.dirPosMap[u.path].entry]
entry := u.entries[dirPos]
file := false
d, _ := u.d.GetDir(dirPos)
if d == nil {
file = true
}
u.boxMenu = []string{"cancel", "confirm"}
if file {
if obj, isFile := entry.(fs.Object); isFile {
u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) {
if o != 1 {
return "Aborted!", nil
}
err := f.Rmdir(entry.String())
err := operations.DeleteFile(obj)
if err != nil {
return "", err
}

View File

@@ -27,6 +27,11 @@ const (
//
// It returns a func which should be called to stop the stats.
func startProgress() func() {
err := initTerminal()
if err != nil {
fs.Errorf(nil, "Failed to start progress: %v", err)
return func() {}
}
stopStats := make(chan struct{})
oldLogPrint := fs.LogPrint
if !log.Redirected() {
@@ -51,6 +56,7 @@ func startProgress() func() {
printProgress("")
case <-stopStats:
ticker.Stop()
printProgress("")
fs.LogPrint = oldLogPrint
fmt.Println("")
return

View File

@@ -4,6 +4,10 @@ package cmd
import "os"
func initTerminal() error {
return nil
}
func writeToTerminal(b []byte) {
_, _ = os.Stdout.Write(b)
}

View File

@@ -5,22 +5,31 @@ package cmd
import (
"fmt"
"os"
"sync"
"syscall"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Azure/go-ansiterm/winterm"
"github.com/pkg/errors"
)
var (
initAnsiParser sync.Once
ansiParser *ansiterm.AnsiParser
ansiParser *ansiterm.AnsiParser
)
func initTerminal() error {
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
if winEventHandler == nil {
err := syscall.GetLastError()
if err == nil {
err = errors.New("initialization failed")
}
return errors.Wrap(err, "windows terminal")
}
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
return nil
}
func writeToTerminal(b []byte) {
initAnsiParser.Do(func() {
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
})
// Remove all non-ASCII characters until this is fixed
// https://github.com/Azure/go-ansiterm/issues/26
r := []rune(string(b))

View File

@@ -126,7 +126,7 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
}
// Make the entries for display
directory := serve.NewDirectory(dirRemote)
directory := serve.NewDirectory(dirRemote, s.HTMLTemplate)
for _, node := range dirEntries {
directory.AddEntry(node.Path(), node.IsDir())
}

View File

@@ -4,14 +4,18 @@ package httplib
import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"html/template"
"io/ioutil"
"log"
"net"
"net/http"
"strings"
"time"
auth "github.com/abbot/go-http-auth"
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
@@ -105,8 +109,9 @@ type Server struct {
waitChan chan struct{} // for waiting on the listener to close
httpServer *http.Server
basicPassHashed string
useSSL bool // if server is configured for SSL/TLS
usingAuth bool // set if authentication is configured
useSSL bool // if server is configured for SSL/TLS
usingAuth bool // set if authentication is configured
HTMLTemplate *template.Template // HTML template for web interface
}
// singleUserProvider provides the encrypted password for a single user
@@ -143,7 +148,28 @@ func NewServer(handler http.Handler, opt *Options) *Server {
secretProvider = s.singleUserProvider
}
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
handler = auth.JustCheck(authenticator, handler.ServeHTTP)
oldHandler := handler
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if username := authenticator.CheckAuth(r); username == "" {
authHeader := r.Header.Get(authenticator.Headers.V().Authorization)
if authHeader != "" {
s := strings.SplitN(authHeader, " ", 2)
var userName = "UNKNOWN"
if len(s) == 2 && s[0] == "Basic" {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
userName = strings.SplitN(string(b), ":", 2)[0]
}
}
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, userName)
} else {
fs.Infof(r.URL.Path, "%s: Basic auth challenge sent", r.RemoteAddr)
}
authenticator.RequireAuth(w, r)
} else {
oldHandler.ServeHTTP(w, r)
}
})
s.usingAuth = true
}
@@ -182,6 +208,12 @@ func NewServer(handler http.Handler, opt *Options) *Server {
s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
htmlTemplate, templateErr := data.GetTemplate()
if templateErr != nil {
log.Fatalf(templateErr.Error())
}
s.HTMLTemplate = htmlTemplate
return s
}

View File

@@ -0,0 +1,22 @@
// +build ignore
package main
import (
"log"
"net/http"
"github.com/shurcooL/vfsgen"
)
func main() {
var AssetDir http.FileSystem = http.Dir("./templates")
err := vfsgen.Generate(AssetDir, vfsgen.Options{
PackageName: "data",
BuildTags: "!dev",
VariableName: "Assets",
})
if err != nil {
log.Fatalln(err)
}
}

View File

@@ -0,0 +1,186 @@
// Code generated by vfsgen; DO NOT EDIT.
// +build !dev
package data
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
pathpkg "path"
"time"
)
// Assets statically implements the virtual filesystem provided to vfsgen.
var Assets = func() http.FileSystem {
fs := vfsgen۰FS{
"/": &vfsgen۰DirInfo{
name: "/",
modTime: time.Date(2018, 12, 16, 6, 54, 42, 894445775, time.UTC),
},
"/index.html": &vfsgen۰CompressedFileInfo{
name: "index.html",
modTime: time.Date(2018, 12, 16, 6, 54, 42, 790442328, time.UTC),
uncompressedSize: 226,
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x8f\x31\xcf\x83\x20\x10\x86\x77\x7e\xc5\x7d\xc4\xf5\x93\xb8\x35\x0d\xb0\xb4\x6e\x26\x6d\x1a\x3b\x74\x3c\xeb\x29\x24\x4a\x13\xa4\x43\x43\xf8\xef\x0d\xea\xd4\x09\xee\x79\xef\x9e\xcb\xc9\xbf\xf3\xe5\xd4\x3e\xae\x35\x98\x30\x4f\x9a\xc9\xfc\xc0\x84\x6e\x54\x9c\x1c\xcf\x80\xb0\xd7\x4c\xce\x14\x10\x9e\x06\xfd\x42\x41\xf1\x77\x18\xfe\x0f\x39\x0d\x36\x4c\xa4\x63\x84\xb2\xcd\x3f\x48\x49\x8a\x8d\x31\x29\xf6\xd1\xee\xd5\x7f\xb2\xa8\xfa\xe9\x33\x95\x66\x31\x82\x47\x37\x12\x14\x16\x8e\x0a\xca\xda\x05\x6f\x69\xc9\x39\x82\xf1\x34\x28\x1e\x23\x14\xb6\xbc\xdf\x1a\x48\x89\xeb\xad\x6a\x08\x87\xd5\x81\x5a\x76\x1e\xc4\x2a\x22\xd7\xaf\x6c\xdf\x27\xb6\x8b\xbe\x01\x00\x00\xff\xff\x92\x2e\x35\x75\xe2\x00\x00\x00"),
},
}
fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{
fs["/index.html"].(os.FileInfo),
}
return fs
}()
type vfsgen۰FS map[string]interface{}
func (fs vfsgen۰FS) Open(path string) (http.File, error) {
path = pathpkg.Clean("/" + path)
f, ok := fs[path]
if !ok {
return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
}
switch f := f.(type) {
case *vfsgen۰CompressedFileInfo:
gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent))
if err != nil {
// This should never happen because we generate the gzip bytes such that they are always valid.
panic("unexpected error reading own gzip compressed bytes: " + err.Error())
}
return &vfsgen۰CompressedFile{
vfsgen۰CompressedFileInfo: f,
gr: gr,
}, nil
case *vfsgen۰DirInfo:
return &vfsgen۰Dir{
vfsgen۰DirInfo: f,
}, nil
default:
// This should never happen because we generate only the above types.
panic(fmt.Sprintf("unexpected type %T", f))
}
}
// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file.
type vfsgen۰CompressedFileInfo struct {
name string
modTime time.Time
compressedContent []byte
uncompressedSize int64
}
func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) {
return nil, fmt.Errorf("cannot Readdir from file %s", f.name)
}
func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil }
func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte {
return f.compressedContent
}
func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name }
func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize }
func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 }
func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime }
func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false }
func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil }
// vfsgen۰CompressedFile is an opened compressedFile instance.
type vfsgen۰CompressedFile struct {
*vfsgen۰CompressedFileInfo
gr *gzip.Reader
grPos int64 // Actual gr uncompressed position.
seekPos int64 // Seek uncompressed position.
}
func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
if f.grPos > f.seekPos {
// Rewind to beginning.
err = f.gr.Reset(bytes.NewReader(f.compressedContent))
if err != nil {
return 0, err
}
f.grPos = 0
}
if f.grPos < f.seekPos {
// Fast-forward.
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
if err != nil {
return 0, err
}
f.grPos = f.seekPos
}
n, err = f.gr.Read(p)
f.grPos += int64(n)
f.seekPos = f.grPos
return n, err
}
func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
f.seekPos = 0 + offset
case io.SeekCurrent:
f.seekPos += offset
case io.SeekEnd:
f.seekPos = f.uncompressedSize + offset
default:
panic(fmt.Errorf("invalid whence value: %v", whence))
}
return f.seekPos, nil
}
func (f *vfsgen۰CompressedFile) Close() error {
return f.gr.Close()
}
// vfsgen۰DirInfo is a static definition of a directory.
type vfsgen۰DirInfo struct {
name string
modTime time.Time
entries []os.FileInfo
}
func (d *vfsgen۰DirInfo) Read([]byte) (int, error) {
return 0, fmt.Errorf("cannot Read from directory %s", d.name)
}
func (d *vfsgen۰DirInfo) Close() error { return nil }
func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil }
func (d *vfsgen۰DirInfo) Name() string { return d.name }
func (d *vfsgen۰DirInfo) Size() int64 { return 0 }
func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir }
func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime }
func (d *vfsgen۰DirInfo) IsDir() bool { return true }
func (d *vfsgen۰DirInfo) Sys() interface{} { return nil }
// vfsgen۰Dir is an opened dir instance.
type vfsgen۰Dir struct {
*vfsgen۰DirInfo
pos int // Position within entries for Seek and Readdir.
}
func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) {
if offset == 0 && whence == io.SeekStart {
d.pos = 0
return 0, nil
}
return 0, fmt.Errorf("unsupported Seek in directory %s", d.name)
}
func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) {
if d.pos >= len(d.entries) && count > 0 {
return nil, io.EOF
}
if count <= 0 || count > len(d.entries)-d.pos {
count = len(d.entries) - d.pos
}
e := d.entries[d.pos : d.pos+count]
d.pos += count
return e, nil
}

View File

@@ -0,0 +1,36 @@
//go:generate go run assets_generate.go
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"html/template"
"io/ioutil"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// GetTemplate eturns the HTML template for serving directories via HTTP
func GetTemplate() (tpl *template.Template, err error) {
templateFile, err := Assets.Open("index.html")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
var templateString = string(templateBytes)
tpl, err = template.New("index").Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

View File

@@ -0,0 +1,11 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ .Title }}</title>
</head>
<body>
<h1>{{ .Title }}</h1>
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
{{ end }}</body>
</html>

View File

@@ -21,17 +21,19 @@ type DirEntry struct {
// Directory represents a directory
type Directory struct {
DirRemote string
Title string
Entries []DirEntry
Query string
DirRemote string
Title string
Entries []DirEntry
Query string
HTMLTemplate *template.Template
}
// NewDirectory makes an empty Directory
func NewDirectory(dirRemote string) *Directory {
func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory {
d := &Directory{
DirRemote: dirRemote,
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
DirRemote: dirRemote,
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
HTMLTemplate: htmlTemplate,
}
return d
}
@@ -77,26 +79,10 @@ func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
defer accounting.Stats.DoneTransferring(d.DirRemote, true)
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
err := indexTemplate.Execute(w, d)
err := d.HTMLTemplate.Execute(w, d)
if err != nil {
Error(d.DirRemote, w, "Failed to render template", err)
return
}
}
// indexPage is a directory listing template
var indexPage = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ .Title }}</title>
</head>
<body>
<h1>{{ .Title }}</h1>
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
{{ end }}</body>
</html>
`
// indexTemplate is the instantiated indexPage
var indexTemplate = template.Must(template.New("index").Parse(indexPage))

View File

@@ -2,23 +2,32 @@ package serve
import (
"errors"
"html/template"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func GetTemplate(t *testing.T) *template.Template {
htmlTemplate, err := data.GetTemplate()
require.NoError(t, err)
return htmlTemplate
}
func TestNewDirectory(t *testing.T) {
d := NewDirectory("z")
d := NewDirectory("z", GetTemplate(t))
assert.Equal(t, "z", d.DirRemote)
assert.Equal(t, "Directory listing of /z", d.Title)
}
func TestSetQuery(t *testing.T) {
d := NewDirectory("z")
d := NewDirectory("z", GetTemplate(t))
assert.Equal(t, "", d.Query)
d.SetQuery(url.Values{"potato": []string{"42"}})
assert.Equal(t, "?potato=42", d.Query)
@@ -27,7 +36,7 @@ func TestSetQuery(t *testing.T) {
}
func TestAddEntry(t *testing.T) {
var d = NewDirectory("z")
var d = NewDirectory("z", GetTemplate(t))
d.AddEntry("", true)
d.AddEntry("dir", true)
d.AddEntry("a/b/c/d.txt", false)
@@ -42,7 +51,7 @@ func TestAddEntry(t *testing.T) {
}, d.Entries)
// Now test with a query parameter
d = NewDirectory("z").SetQuery(url.Values{"potato": []string{"42"}})
d = NewDirectory("z", GetTemplate(t)).SetQuery(url.Values{"potato": []string{"42"}})
d.AddEntry("file", false)
d.AddEntry("dir", true)
assert.Equal(t, []DirEntry{
@@ -62,7 +71,7 @@ func TestError(t *testing.T) {
}
func TestServe(t *testing.T) {
d := NewDirectory("aDirectory")
d := NewDirectory("aDirectory", GetTemplate(t))
d.AddEntry("file", false)
d.AddEntry("dir", true)

View File

@@ -1,4 +1,7 @@
// Package restic serves a remote suitable for use with restic
// +build go1.9
package restic
import (

View File

@@ -1,3 +1,5 @@
// +build go1.9
package restic
import (

View File

@@ -1,5 +1,8 @@
// Serve restic tests set up a server and run the integration tests
// for restic against it.
// +build go1.9
package restic
import (

View File

@@ -0,0 +1,11 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package restic
import "github.com/spf13/cobra"
// Command definition is nil to show not implemented
var Command *cobra.Command = nil

View File

@@ -1,3 +1,5 @@
// +build go1.9
package restic
import (

View File

@@ -1,4 +1,4 @@
//+build !go1.10
//+build go1.9,!go1.10
// Fallback deadline setting for pre go1.10

View File

@@ -13,8 +13,12 @@ import (
func init() {
Command.AddCommand(http.Command)
Command.AddCommand(webdav.Command)
Command.AddCommand(restic.Command)
if webdav.Command != nil {
Command.AddCommand(webdav.Command)
}
if restic.Command != nil {
Command.AddCommand(restic.Command)
}
if ftp.Command != nil {
Command.AddCommand(ftp.Command)
}

View File

@@ -1,3 +1,5 @@
//+build go1.9
package webdav
import (

View File

@@ -3,7 +3,7 @@
//
// We skip tests on platforms with troublesome character mappings
//+build !windows,!darwin
//+build !windows,!darwin,go1.9
package webdav

Some files were not shown because too many files have changed in this diff Show More