1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-05 10:03:17 +00:00

Compare commits

..

103 Commits

Author SHA1 Message Date
Nick Craig-Wood
d6778c9d19 mount: make directories show with non zero size
See: https://forum.rclone.org/t/empty-folder-when-rclone-mount-used-as-external-storage-of-nextcloud/9251
2019-03-25 11:21:26 +00:00
Nick Craig-Wood
6e70d88f54 swift: work around token expiry on CEPH
This implements the Expiry interface so token expiry works properly

This change makes sure that this change from the swift library works
correctly with rclone's custom authenticator.

> Renew the token 60s before the expiry time
>
> The v2 and v3 auth schemes both return the expiry time of the token,
> so instead of waiting for a 401 error, renew the token 60s before this
> time.
>
> This makes transfers more efficient and also works around a bug in
> CEPH which returns 403 instead of 401 when the token expires.
>
> http://tracker.ceph.com/issues/22223
2019-03-18 13:30:59 +00:00
Nick Craig-Wood
595fea757d vendor: update github.com/ncw/swift to bring in Expires changes 2019-03-18 13:30:59 +00:00
Nick Craig-Wood
bb80586473 bin/get-github-release: fetch the most recent not the least recent 2019-03-18 11:29:37 +00:00
Nick Craig-Wood
0d475958c7 Fix errors discovered with go vet nilness tool 2019-03-18 11:23:00 +00:00
Nick Craig-Wood
2728948fb0 Add xopez to contributors 2019-03-18 11:04:10 +00:00
Nick Craig-Wood
3756f211b5 Add Danil Semelenov to contributors 2019-03-18 11:04:10 +00:00
xopez
2faf2aed80 docs: Update Copyright to current Year 2019-03-18 11:03:45 +00:00
Nick Craig-Wood
1bd8183af1 build: use matrix build for travis
This makes the build more efficient, the .travis.yml file more
comprehensible and reduces the Makefile spaghetti.

Windows support is commented out for the moment as it isn't very
reliable yet.
2019-03-17 14:58:18 +00:00
Nick Craig-Wood
5aa706831f b2: ignore already_hidden error on remove
Sometimes (possibly through eventual consistency) b2 returns an
already_hidden error on a delete.  Ignore this since it is harmless.
2019-03-17 14:56:17 +00:00
Nick Craig-Wood
ac7e1dbf62 test_all: add the vfs tests to the integration tests
Fix failing tests for some remotes
2019-03-17 14:56:17 +00:00
Nick Craig-Wood
14ef4437e5 dedupe: fix bug introduced when converting to use walk.ListR #2902
Before the fix we were only de-duping the ListR batches.

Afterwards we dedupe everything.

This will have the consequence that rclone uses more memory as it will
build a map of all the directory names, not just the names in a given
directory.
2019-03-17 11:01:20 +00:00
Danil Semelenov
a0d2ab5b4f cmd: Fix autocompletion of remote paths with spaces - fixes #3047 2019-03-17 10:15:20 +00:00
Nick Craig-Wood
3bfde5f52a ftp: add --ftp-concurrency to limit maximum number of connections
Fixes #2166
2019-03-17 09:57:14 +00:00
Nick Craig-Wood
2b05bd9a08 rc: implement operations/publiclink the equivalent of rclone link
Fixes #3042
2019-03-17 09:41:31 +00:00
Nick Craig-Wood
1318be3b0a vendor: update github.com/goftp/server to fix hang while reading a file from the server
See: https://forum.rclone.org/t/minor-issue-with-linux-ftp-client-and-rclone-ftp-access-denied/8959
2019-03-17 09:30:57 +00:00
Nick Craig-Wood
f4a754a36b drive: add --skip-checksum-gphotos to ignore incorrect checksums on Google Photos
First implementation by @jammin84, re-written by @ncw

Fixes #2207
2019-03-17 09:10:51 +00:00
Nick Craig-Wood
fef73763aa lib/atexit: add SIGTERM to signals which run the exit handlers on unix 2019-03-16 17:47:02 +00:00
Nick Craig-Wood
7267d19ad8 fstest: Use walk.ListR for listing 2019-03-16 17:41:12 +00:00
Nick Craig-Wood
47099466c0 cache: Use walk.ListR for listing the temporary Fs. 2019-03-16 17:41:12 +00:00
Nick Craig-Wood
4376019062 dedupe: Use walk.ListR for listing commands.
This dramatically increases the speed (7x in my tests) of the de-dupe
as google drive supports ListR directly and dedupe did not work with
`--fast-list`.

Fixes #2902
2019-03-16 17:41:12 +00:00
Nick Craig-Wood
e5f4210b09 serve restic: use walk.ListR for listing
This is effectively what the old code did anyway so this should not
make any functional changes.
2019-03-16 17:41:12 +00:00
Nick Craig-Wood
d5f2df2f3d Use walk.ListR for listing operations
This will increase speed for backends which support ListR and will not
have the memory overhead of using --fast-list.

It also means that errors are queued until the end so as much of the
remote will be listed as possible before returning an error.

Commands affected are:
- lsf
- ls
- lsl
- lsjson
- lsd
- md5sum/sha1sum/hashsum
- size
- delete
- cat
- settier
2019-03-16 17:41:12 +00:00
Nick Craig-Wood
efd720b533 walk: Implement walk.ListR which will use ListR if at all possible
It otherwise has the nearly the same interface as walk.Walk which it
will fall back to if it can't use ListR.

Using walk.ListR will speed up file system operations by default and
use much less memory and start immediately compared to if --fast-list
had been supplied.
2019-03-16 17:41:12 +00:00
Nick Craig-Wood
047f00a411 filter: Add BoundedRecursion method
This indicates that the filter set could be satisfied by a bounded
directory recursion.
2019-03-16 17:41:12 +00:00
Nick Craig-Wood
bb5ac8efbe http: fix socket leak on 404 errors 2019-03-15 17:04:28 +00:00
Nick Craig-Wood
e62bbf761b http: add --http-no-slash for websites with directories with no slashes #3053
See: https://forum.rclone.org/t/is-there-a-way-to-log-into-an-htpp-server/8484
2019-03-15 17:04:06 +00:00
Nick Craig-Wood
54a2e99d97 http: remove duplicates from listings 2019-03-15 16:59:36 +00:00
Nick Craig-Wood
28230d93b4 sync: Implement --suffix-keep-extension for use with --suffix - fixes #3032 2019-03-15 14:21:39 +00:00
Florian Gamböck
3c4407442d cmd: fix completion of remotes
The previous behavior of the remotes completion was that only
alphanumeric characters were allowed in a remote name. This limitation
has been lifted somewhat by #2985, which also allowed an underscore.

With the new implementation introduced in this commit, the completion of
the remote name has been simplified: If there is no colon (":") in the
current word, then complete remote name. Otherwise, complete the path
inside the specified remote. This allows correct completion of all
remote names that are allowed by the config (including - and _).
Actually it matches much more than that, even remote names that are not
allowed by the config, but in such a case there already would be a wrong
identifier in the configuration file.

With this simpler string comparison, we can get rid of the regular
expression, which makes the completion multiple times faster. For a
sample benchmark, try the following:

     # Old way
     $ time bash -c 'for _ in {1..1000000}; do
         [[ remote:path =~ ^[[:alnum:]]*$ ]]; done'

     real    0m15,637s
     user    0m15,613s
     sys     0m0,024s

     # New way
     $ time bash -c 'for _ in {1..1000000}; do
         [[ remote:path != *:* ]]; done'

     real    0m1,324s
     user    0m1,304s
     sys     0m0,020s
2019-03-15 13:16:42 +00:00
Dan Walters
caf318d499 dlna: add connection manager service description
The UPnP MediaServer spec says that the ConnectionManager service is
required, and adding it was enough to get dlna support working on my
other TV (LG webOS 2.2.1).
2019-03-15 13:14:31 +00:00
Nick Craig-Wood
2fbb504b66 webdav: fix About/df when reading the available/total returns 0
Some WebDAV servers return an empty Available and Used which parses as 0.

This caused About to return the Total as 0 which can confused mounted
file systems.

After this change we ignore the result if Available and Used are both 0.

See: https://forum.rclone.org/t/windows-mounted-webdav-drive-has-no-free-space/8938
2019-03-15 12:03:04 +00:00
Alex Chen
2b58d1a46f docs: onedrive: Add guide to refreshing token after MFA is enabled 2019-03-14 00:21:05 +08:00
Cnly
1582a21408 onedrive: Always add trailing colon to path when addressing items - #2720, #3039 2019-03-13 11:30:15 +08:00
Nick Craig-Wood
229898dcee Add Dan Walters to contributors 2019-03-11 17:31:46 +00:00
Dan Walters
95194adfd5 dlna: fix root XML service descriptor
The SCPD URL was being set after marshalling the XML, and thus coming
out blank.  Now works on my Samsung TV, and likely fixes some issues
reported by others in #2648.
2019-03-11 17:31:32 +00:00
Nick Craig-Wood
4827496234 webdav: fix race when creating directories - fixes #3035
Before this change a race condition existed in mkdir
- the directory was attempted to be created
- the parent didn't exist so it failed
- the parent was created
- the directory was created again

The last step failed as the directory was created in a different thread.

This was fixed by checking the error messages of MKCOL for both
directory creations, rather than only the first.
2019-03-11 16:20:05 +00:00
Nick Craig-Wood
415eeca6cf drive: fix range requests on 0 length files
Before this change a range request on a 0 length file would fail

    $ rclone cat --head 128 drive:test/emptyfile
    ERROR : open file failed: googleapi: Error 416: Request range not satisfiable, requestedRangeNotSatisfiable

To fix this we remove Range: headers on requests for zero length files.
2019-03-10 15:47:34 +00:00
Nick Craig-Wood
58d9a3e1b5 filter: reload filter when the options are set via the rc - fixes #3018 2019-03-10 13:09:44 +00:00
Nick Craig-Wood
cccadfa7ae rc: add ability for options blocks to register reload functions 2019-03-10 13:09:44 +00:00
ishuah
1b52f8d2a5 copy/sync/move: add --create-empty-src-dirs flag - fixes #2869 2019-03-10 11:56:38 +00:00
Nick Craig-Wood
2078ad68a5 gcs: Allow bucket policy only buckets - fixes #3014
This introduces a new config variable bucket_policy_only.  If this is
set then rclone:

- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
2019-03-10 11:45:42 +00:00
Nick Craig-Wood
368ed9e67d docs: add a FAQ entry about --max-backlog 2019-03-09 16:19:24 +00:00
Nick Craig-Wood
7c30993bb7 Add Fionera to contributors 2019-03-09 16:19:24 +00:00
Fionera
55b9a4ed30 Add ServerSideAcrossConfig Flag and check for it. fixes #2728 2019-03-09 16:18:45 +00:00
jaKa
118a8b949e koofr: implemented a backend for Koofr cloud storage service.
Implemented a Koofr REST API backend.
Added said backend to tests.
Added documentation for said backend.
2019-03-06 13:41:43 +00:00
jaKa
1d14e30383 vendor: add github.com/koofr/go-koofrclient
* added koofr client SDK dep for koofr backend
2019-03-06 13:41:43 +00:00
Nick Craig-Wood
27714e29c3 s3: note incompatibility with CEPH Jewel - fixes #3015 2019-03-06 11:50:37 +00:00
Nick Craig-Wood
9f8e1a1dc5 drive: fix imports of text files
Before this change text file imports were ignored.  This was because
the mime type wasn't matched.

Fix this by adjusting the keys in the mime type maps as well as the
values.

See: https://forum.rclone.org/t/how-to-upload-text-files-to-google-drive-as-google-docs/9014
2019-03-05 17:20:31 +00:00
Nick Craig-Wood
1692c6bd0a vfs: shorten the locking window for vfs/refresh
Before this change we locked the root directory, recursively fetched
the listing, applied it then unlocked the root directory.

After this change we recursively fetch the listing then apply it with
the root directory locked which shortens the time that the root
directory is locked greatly.

With the original method and the new method the subdirectories are
left unlocked and so potentially could be changed leading to
inconsistencies.  This change makes the potential for inconsistencies
slightly worse by leaving the root directory unlocked at a gain of a
much more responsive system while runing vfs/refresh.

See: https://forum.rclone.org/t/rclone-rc-vfs-refresh-locking-directory-being-refreshed/9004
2019-03-05 14:17:42 +00:00
Nick Craig-Wood
d233efbf63 Add marcintustin to contributors 2019-03-01 17:10:26 +00:00
marcintustin
e9a45a5a34 googlecloudstorage: fall back to default application credentials
Fall back to default application credentials when all other credentials sources fail

This change allows users with default application credentials
configured (notably when running on google compute instances) to
dispense with explicitly configuring google cloud storage credentials
in rclone's own configuration.
2019-03-01 18:05:31 +01:00
Nick Craig-Wood
f6eb5c6983 lib/pacer: fix test on macOS 2019-03-01 12:27:33 +00:00
Nick Craig-Wood
2bf19787d5 Add Dr.Rx to contributors 2019-03-01 12:25:16 +00:00
Dr.Rx
0ea3a57ecb azureblob: Enable MD5 checksums when uploading files bigger than the "Cutoff"
This enables MD5 checksum calculation and publication when uploading file above the "Cutoff" limit.
It was explictely ignored in case of multi-block (a.k.a. multipart) uploads to Azure Blob Storage.
2019-03-01 11:12:23 +01:00
Nick Craig-Wood
b353c730d8 vfs: make tests work on remotes which don't support About 2019-02-28 14:05:21 +00:00
Nick Craig-Wood
173dfbd051 vfs: read directory and check for a file before mkdir
Before this change when doing Mkdir the VFS layer could add the new
item to an unread directory which caused confusion.

It could also do mkdir on a file when run on a bucket based remote
which would temporarily overwrite the file with a directory.

Fixes #2993
2019-02-28 14:05:17 +00:00
Nick Craig-Wood
e3bceb9083 operations: fix Overlapping test for Windows native paths 2019-02-28 11:39:32 +00:00
Nick Craig-Wood
52c6b373cc Add calisro to contributors 2019-02-28 10:20:35 +00:00
calisro
0bc0f62277 Recommendation for creating own client ID 2019-02-28 11:20:08 +01:00
Cnly
12c8ee4b4b atexit: allow functions to be unregistered 2019-02-27 23:37:24 +01:00
Nick Craig-Wood
5240f9d1e5 sync: fix integration tests to check correct error 2019-02-27 22:05:16 +00:00
Nick Craig-Wood
997654d77d ncdu: fix display corruption with Chinese characters - #2989 2019-02-27 09:55:28 +00:00
Nick Craig-Wood
f1809451f6 docs: add more examples of config-less usage 2019-02-27 09:41:40 +00:00
Nick Craig-Wood
84c650818e sync: don't allow syncs on overlapping remotes - fixes #2932 2019-02-26 19:25:52 +00:00
Nick Craig-Wood
c5775cf73d fserrors: don't panic on uncomparable errors 2019-02-26 15:39:16 +00:00
Nick Craig-Wood
dca482e058 Add Alexandru Bumbacea to contributors 2019-02-26 15:39:16 +00:00
Nick Craig-Wood
6943169cef Add Six to contributors 2019-02-26 15:38:25 +00:00
Alexandru Bumbacea
4fddec113c sftp: allow custom ssh client config 2019-02-26 16:37:54 +01:00
Six
2114fd8f26 cmd: Fix tab-completion for remotes with underscores in their names 2019-02-26 16:25:45 +01:00
Nick Craig-Wood
63bb6de491 build: update to use go1.12 for the build 2019-02-26 13:18:31 +00:00
Nick Craig-Wood
0a56a168ff bin/get-github-release.go: scrape the downloads page to avoid the API limit
This should fix pull requests build failures which can't use the
github token.
2019-02-25 21:34:59 +00:00
Nick Craig-Wood
88e22087a8 Add Nestar47 to contributors 2019-02-25 21:34:59 +00:00
Nestar47
9404ed703a drive: add docs on team drives and --fast-list eventual consistency 2019-02-25 21:46:27 +01:00
Nick Craig-Wood
c7ecccd5ca mount: remove an obsolete EXPERIMENTAL tag from the docs 2019-02-25 17:53:53 +00:00
Sebastian Bünger
972e27a861 jottacloud: fix token refresh - fixes #2992 2019-02-21 19:26:18 +01:00
Fabian Möller
8f4ea77c07 fs: remove unnecessary pacer warning 2019-02-18 08:42:36 +01:00
Fabian Möller
61616ba864 pacer: make pacer more flexible
Make the pacer package more flexible by extracting the pace calculation
functions into a separate interface. This also allows to move features
that require the fs package like logging and custom errors into the fs
package.

Also add a RetryAfterError sentinel error that can be used to signal a
desired retry time to the Calculator.
2019-02-16 14:38:07 +00:00
Fabian Möller
9ed721a3f6 errors: add lib/errors package 2019-02-16 14:38:07 +00:00
Nick Craig-Wood
0b9d7fec0c lsf: add 'e' format to show encrypted names and 'o' for original IDs
This brings it up to par with lsjson.

This commit also reworks the framework to use ListJSON internally
which removes duplicated code and makes testing easier.
2019-02-14 14:45:35 +00:00
Nick Craig-Wood
240c15883f accounting: fix total ETA when --stats-unit bits is in effect 2019-02-14 07:56:52 +00:00
Nick Craig-Wood
38864adc9c cmd: Use private custom func to fix clash between rclone and kubectl
Before this change, rclone used the `__custom_func` hook to control
the completions of remote files.  However this clashes with other
cobra users, the most notable example being kubectl.

Upgrading cobra to master allows us to use a namespaced function
`__rclone_custom_func` which fixes the problem.

Fixes #1529
2019-02-13 23:02:22 +00:00
Nick Craig-Wood
5991315990 vendor: update github.com/spf13/cobra to master 2019-02-13 23:02:22 +00:00
Nick Craig-Wood
73f0a67d98 s3: Update Dreamhost endpoint - fixes #2974 2019-02-13 21:10:43 +00:00
Nick Craig-Wood
ffe067d6e7 azureblob: fix SAS URL support - fixes #2969
This was broken accidentally in 5d1d93e163 as part of #2654
2019-02-13 17:36:14 +00:00
Nick Craig-Wood
b5f563fb0f vfs: Ignore Truncate if called with no readers and already the correct size
This fixes FreeBSD which seems to call SetAttr with a size even on
read only files.

This is probably a bug in the FreeBSD FUSE implementation as it
happens with mount and cmount.

See: https://forum.rclone.org/t/freebsd-question/8662/12
2019-02-12 17:27:04 +00:00
Nick Craig-Wood
9310c7f3e2 build: update to use go1.12rc1 for the build 2019-02-12 16:23:08 +00:00
Nick Craig-Wood
1c1a8ef24b webdav: allow IsCollection property to be integer or boolean - fixes #2964
It turns out that some servers emit "true" or "false" rather than "1"
or "0" for this property, so adapt accordingly.
2019-02-12 12:33:08 +00:00
Nick Craig-Wood
2cfbc2852d docs: move --no-traverse docs to the correct section 2019-02-12 12:26:19 +00:00
Nick Craig-Wood
b167d30420 Add client side TLS/SSL flags --ca-cert/--client-cert/--client-key
Fixes #2966
2019-02-12 12:26:19 +00:00
Nick Craig-Wood
ec59760d9c pcloud: remove duplicated UserInfo.Result field spotted by go vet 2019-02-12 11:53:26 +00:00
Nick Craig-Wood
076d3da825 operations: resume downloads if the reader fails in copy - fixes #2108
This puts a shim on the reader opened by Copy so that if an error is
returned, the reader is re-opened at the correct seek point.

This should make downloading very large files more reliable.
2019-02-12 11:47:57 +00:00
Nick Craig-Wood
c3eecbe933 dropbox: retry blank errors to fix long listings
Sometimes dropbox returns blank errors in listings - retry this

See: https://forum.rclone.org/t/bug-sync-dropbox-to-gdrive-failing-for-large-files-50gb-error-unexpected-eof/8595
2019-02-10 20:55:16 +00:00
Nick Craig-Wood
d8e5b19ed4 build: switch to semvar compliant version tags
Fixes #2960
2019-02-10 20:55:16 +00:00
Nick Craig-Wood
43bc381e90 vendor: update all dependencies 2019-02-10 20:55:16 +00:00
Nick Craig-Wood
fb5ee22112 Add Vince to contributors 2019-02-10 20:55:16 +00:00
Vince
35327dad6f b2: allow manual configuration of backblaze downloadUrl - fixes #2808 2019-02-10 20:54:10 +00:00
Fabian Möller
ef5e1909a0 encoder: add lib/encoder to handle character subsitution and quoting 2019-02-09 18:23:47 +00:00
Fabian Möller
bca5d8009e onedrive: return errors instead of panic for invalid uploads 2019-02-09 18:23:47 +00:00
Fabian Möller
334f19c974 info: improve allowed character testing 2019-02-09 18:23:47 +00:00
Fabian Möller
42a5bf1d9f golangci: enable lints excluded by default 2019-02-09 18:18:22 +00:00
Nick Craig-Wood
71d1890316 build: ignore testbuilds when uploading to github 2019-02-09 12:22:06 +00:00
Nick Craig-Wood
d29c545627 Start v1.46-DEV development 2019-02-09 12:21:57 +00:00
476 changed files with 47363 additions and 11175 deletions

View File

@@ -20,6 +20,9 @@ linters:
disable-all: true disable-all: true
issues: issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50. # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0 max-per-linter: 0

View File

@@ -1,26 +1,33 @@
---
language: go language: go
sudo: required sudo: required
dist: trusty dist: trusty
os: os:
- linux - linux
go:
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- tip
go_import_path: github.com/ncw/rclone go_import_path: github.com/ncw/rclone
before_install: before_install:
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi - git fetch --unshallow --tags
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi - |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
install: install:
- git fetch --unshallow --tags - make vars
- make vars
- make build_dep
script:
- make check
- make quicktest
- make compile_all
env: env:
global: global:
- GOTAGS=cmount - GOTAGS=cmount
@@ -31,23 +38,66 @@ env:
addons: addons:
apt: apt:
packages: packages:
- fuse - fuse
- libfuse-dev - libfuse-dev
- rpm - rpm
- pkg-config - pkg-config
cache: cache:
directories: directories:
- $HOME/.cache/go-build - $HOME/.cache/go-build
matrix: matrix:
allow_failures: allow_failures:
- go: tip - go: tip
include: include:
- os: osx - go: 1.8.x
go: 1.11.x script:
env: GOTAGS="" - make quicktest
cache: - go: 1.9.x
directories: script:
- $HOME/Library/Caches/go-build - make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
env:
- GOTAGS=cmount
script:
- make build_dep
- make check
- make quicktest
- make racequicktest
- make compile_all
- os: osx
go: 1.12.x
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
deploy: deploy:
provider: script provider: script
script: make travis_beta script: make travis_beta
@@ -55,5 +105,5 @@ deploy:
on: on:
repo: ncw/rclone repo: ncw/rclone
all_branches: true all_branches: true
go: 1.11.x go: 1.12.x
condition: $TRAVIS_PULL_REQUEST == false condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"

View File

@@ -11,14 +11,12 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
BRANCH_PATH := BRANCH_PATH :=
endif endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH) TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)') NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifneq ($(TAG),$(LAST_TAG)) ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta TAG := $(TAG)-beta
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ ) GO_FILES := $(shell go list ./... | grep -v /vendor/ )
# Run full tests if go >= go1.11
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
BETA_PATH := $(BRANCH_PATH)$(TAG) BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org BETA_UPLOAD_ROOT := memstore:beta-rclone-org
@@ -42,7 +40,6 @@ vars:
@echo LAST_TAG="'$(LAST_TAG)'" @echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'" @echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'" @echo GO_VERSION="'$(GO_VERSION)'"
@echo FULL_TESTS="'$(FULL_TESTS)'"
@echo BETA_URL="'$(BETA_URL)'" @echo BETA_URL="'$(BETA_URL)'"
version: version:
@@ -57,28 +54,22 @@ test: rclone
# Quick test # Quick test
quicktest: quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES) RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
ifdef FULL_TESTS
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES) RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
endif
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
ifdef FULL_TESTS
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet @# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204 @# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./... @go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run ./... @golangci-lint run ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------" @echo "-- END CODE QUALITY REPORT ---------------------------------"
else
@echo Skipping source quality tests as version of go too old
endif
# Get the build dependencies # Get the build dependencies
build_dep: build_dep:
ifdef FULL_TESTS
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz' go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
endif
# Get the release dependencies # Get the release dependencies
release_dep: release_dep:
@@ -162,11 +153,7 @@ log_since_last_release:
git log $(LAST_TAG).. git log $(LAST_TAG)..
compile_all: compile_all:
ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
else
@echo Skipping compile all as version of go too old
endif
appveyor_upload: appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -186,6 +173,11 @@ BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx) ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo BUILD_FLAGS := -include "^darwin/" -cgo
endif endif
ifeq ($(TRAVIS_OS_NAME),windows)
# BUILD_FLAGS := -include "^windows/" -cgo
# 386 doesn't build yet
BUILD_FLAGS := -include "^windows/amd64" -cgo
endif
travis_beta: travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux) ifeq ($(TRAVIS_OS_NAME),linux)

View File

@@ -36,6 +36,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Hubic [:page_facing_up:](https://rclone.org/hubic/) * Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)

View File

@@ -16,6 +16,7 @@ import (
_ "github.com/ncw/rclone/backend/http" _ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic" _ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud" _ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/koofr"
_ "github.com/ncw/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega" _ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive" _ "github.com/ncw/rclone/backend/onedrive"

View File

@@ -155,7 +155,7 @@ type Fs struct {
noAuthClient *http.Client // unauthenticated http client noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
trueRootID string // ID of true root directory trueRootID string // ID of true root directory
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
} }
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root, root: root,
opt: *opt, opt: *opt,
c: c, c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer), pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(fs.Config), noAuthClient: fshttp.NewClient(fs.Config),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -144,7 +144,7 @@ type Fs struct {
containerOKMu sync.Mutex // mutex to protect container OK containerOKMu sync.Mutex // mutex to protect container OK
containerOK bool // true if we have created the container containerOK bool // true if we have created the container
containerDeleted bool // true if we have deleted the container containerDeleted bool // true if we have deleted the container
pacer *pacer.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
} }
@@ -347,7 +347,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt, opt: *opt,
container: container, container: container,
root: directory, root: directory,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer), pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config), client: fshttp.NewClient(fs.Config),
} }
@@ -392,6 +392,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.New("Container name in SAS URL and container provided in command do not match") return nil, errors.New("Container name in SAS URL and container provided in command do not match")
} }
f.container = parts.ContainerName
containerURL = azblob.NewContainerURL(*u, pipeline) containerURL = azblob.NewContainerURL(*u, pipeline)
} else { } else {
serviceURL = azblob.NewServiceURL(*u, pipeline) serviceURL = azblob.NewServiceURL(*u, pipeline)
@@ -1385,16 +1386,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
blob := o.getBlobReference() blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{} httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(o) httpHeaders.ContentType = fs.MimeType(o)
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate // Compute the Content-MD5 of the file, for multiparts uploads it
// MD5 only for PutBlob requests // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if size < int64(o.fs.opt.UploadCutoff) { // Note: If multipart, a MD5 checksum will also be computed for each uploaded block
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" { // in order to validate its integrity during transport
sourceMD5bytes, err := hex.DecodeString(sourceMD5) if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
if err == nil { sourceMD5bytes, err := hex.DecodeString(sourceMD5)
httpHeaders.ContentMD5 = sourceMD5bytes if err == nil {
} else { httpHeaders.ContentMD5 = sourceMD5bytes
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err) } else {
} fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
} }
} }

View File

@@ -125,6 +125,14 @@ minimum size.`,
Help: `Disable checksums for large (> upload cutoff) files`, Help: `Disable checksums for large (> upload cutoff) files`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "download_url",
Help: `Custom endpoint for downloads.
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}}, }},
}) })
} }
@@ -140,6 +148,7 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
} }
// Fs represents a remote b2 server // Fs represents a remote b2 server
@@ -158,7 +167,7 @@ type Fs struct {
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadURLResponse // result of get upload URL calls uploads []*api.GetUploadURLResponse // result of get upload URL calls
authMu sync.Mutex // lock for authorizing the account authMu sync.Mutex // lock for authorizing the account
pacer *pacer.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
bufferTokens chan []byte // control concurrency of multipart uploads bufferTokens chan []byte // control concurrency of multipart uploads
} }
@@ -242,13 +251,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err) fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
} }
} }
retryAfterDuration := time.Duration(retryAfter) * time.Second return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
if f.pacer.GetSleep() < retryAfterDuration {
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
// We set 1/2 the value here because the pacer will double it immediately
f.pacer.SetSleep(retryAfterDuration / 2)
}
return true, err
} }
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
@@ -354,7 +357,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
bucket: bucket, bucket: bucket,
root: directory, root: directory,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
@@ -949,6 +952,13 @@ func (f *Fs) hide(Name string) error {
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "already_hidden" {
// sometimes eventual consistency causes this, so
// ignore this error since it is harmless
return nil
}
}
return errors.Wrapf(err, "failed to hide %q", Name) return errors.Wrapf(err, "failed to hide %q", Name)
} }
return nil return nil
@@ -1296,9 +1306,17 @@ var _ io.ReadCloser = &openFile{}
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
RootURL: o.fs.info.DownloadURL,
Options: options, Options: options,
} }
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.DownloadURL
} else {
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set otherwise by name // Download by id if set otherwise by name
if o.id != "" { if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id) opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)

View File

@@ -111,7 +111,7 @@ type Fs struct {
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
} }
@@ -260,7 +260,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -1191,7 +1191,7 @@ func (f *Fs) Rmdir(dir string) error {
} }
var queuedEntries []*Object var queuedEntries []*Object
err = walk.Walk(f.tempFs, dir, true, -1, func(path string, entries fs.DirEntries, err error) error { err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo) co := ObjectFromOriginal(f, oo)
@@ -1287,7 +1287,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
var queuedEntries []*Object var queuedEntries []*Object
err := walk.Walk(f.tempFs, srcRemote, true, -1, func(path string, entries fs.DirEntries, err error) error { err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo) co := ObjectFromOriginal(f, oo)

View File

@@ -1023,7 +1023,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
} }
var queuedEntries []fs.Object var queuedEntries []fs.Object
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error { err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo) queuedEntries = append(queuedEntries, oo)

View File

@@ -186,10 +186,10 @@ func init() {
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: config.ConfigClientID, Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.", Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
}, { }, {
Name: config.ConfigClientSecret, Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.", Help: "Google Application Client Secret\nSetting your own is recommended.",
}, { }, {
Name: "scope", Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.", Help: "Scope that rclone should use when requesting access from drive.",
@@ -240,6 +240,22 @@ func init() {
Default: false, Default: false,
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.", Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
Advanced: true, Advanced: true,
}, {
Name: "skip_checksum_gphotos",
Default: false,
Help: `Skip MD5 checksum on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or
videos.
Setting this flag will cause Google photos and videos to return a
blank MD5 checksum.
Google photos are identifed by being in the "photos" space.
Corrupted checksums are caused by Google modifying the image/video but
not updating the checksum.`,
Advanced: true,
}, { }, {
Name: "shared_with_me", Name: "shared_with_me",
Default: false, Default: false,
@@ -396,6 +412,7 @@ type Options struct {
AuthOwnerOnly bool `config:"auth_owner_only"` AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"` UseTrash bool `config:"use_trash"`
SkipGdocs bool `config:"skip_gdocs"` SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"` SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"` TrashedOnly bool `config:"trashed_only"`
Extensions string `config:"formats"` Extensions string `config:"formats"`
@@ -426,7 +443,7 @@ type Fs struct {
client *http.Client // authorized client client *http.Client // authorized client
rootFolderID string // the id of the root folder rootFolderID string // the id of the root folder
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
exportExtensions []string // preferred extensions to download docs exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive isTeamDrive bool // true if this is a team drive
@@ -615,6 +632,9 @@ func (f *Fs) list(dirIDs []string, title string, directoriesOnly, filesOnly, inc
if f.opt.AuthOwnerOnly { if f.opt.AuthOwnerOnly {
fields += ",owners" fields += ",owners"
} }
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
fields = fmt.Sprintf("files(%s),nextPageToken", fields) fields = fmt.Sprintf("files(%s),nextPageToken", fields)
@@ -676,28 +696,33 @@ func isPowerOfTwo(x int64) bool {
} }
// add a charset parameter to all text/* MIME types // add a charset parameter to all text/* MIME types
func fixMimeType(mimeType string) string { func fixMimeType(mimeTypeIn string) string {
mediaType, param, err := mime.ParseMediaType(mimeType) if mimeTypeIn == "" {
return ""
}
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
if err != nil { if err != nil {
return mimeType return mimeTypeIn
} }
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" { mimeTypeOut := mimeTypeIn
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
param["charset"] = "utf-8" param["charset"] = "utf-8"
mimeType = mime.FormatMediaType(mediaType, param) mimeTypeOut = mime.FormatMediaType(mediaType, param)
} }
return mimeType if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
} }
func fixMimeTypeMap(m map[string][]string) map[string][]string { func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
for _, v := range m { out = make(map[string][]string, len(in))
for k, v := range in {
for i, mt := range v { for i, mt := range v {
fixed := fixMimeType(mt) v[i] = fixMimeType(mt)
if fixed == "" {
panic(errors.Errorf("unable to fix MIME type %q", mt))
}
v[i] = fixed
} }
out[fixMimeType(k)] = v
} }
return m return out
} }
func isInternalMimeType(mimeType string) bool { func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.") return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
@@ -789,8 +814,8 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
} }
// newPacer makes a pacer configured for drive // newPacer makes a pacer configured for drive
func newPacer(opt *Options) *pacer.Pacer { func newPacer(opt *Options) *fs.Pacer {
return pacer.New().SetMinSleep(time.Duration(opt.PacerMinSleep)).SetBurst(opt.PacerBurst).SetPacer(pacer.GoogleDrivePacer) return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
} }
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) { func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
@@ -902,6 +927,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
}).Fill(f) }).Fill(f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
@@ -996,6 +1022,15 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
// newRegularObject creates a fs.Object for a normal drive.File // newRegularObject creates a fs.Object for a normal drive.File
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object { func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
break
}
}
}
return &Object{ return &Object{
baseObject: f.newBaseObject(remote, info), baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id), url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
@@ -2430,6 +2465,10 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
return req, nil, err return req, nil, err
} }
fs.OpenOptionAddHTTPHeaders(req.Header, options) fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
delete(req.Header, "Range")
}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req) res, err = o.fs.client.Do(req)
if err == nil { if err == nil {

View File

@@ -160,7 +160,7 @@ type Fs struct {
team team.Client // for the Teams API team team.Client // for the Teams API
slashRoot string // root with "/" prefix, lowercase slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *pacer.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none ns string // The namespace we are using or "" for none
} }
@@ -209,12 +209,12 @@ func shouldRetry(err error) (bool, error) {
case auth.RateLimitAPIError: case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 { if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter) fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
} }
return true, err return true, err
} }
// Keep old behavior for backward compatibility // Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") { if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
return true, err return true, err
} }
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
config := dropbox.Config{ config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo

View File

@@ -15,6 +15,7 @@ import (
"github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers" "github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -45,6 +46,11 @@ func init() {
Help: "FTP password", Help: "FTP password",
IsPassword: true, IsPassword: true,
Required: true, Required: true,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, },
}, },
}) })
@@ -52,10 +58,11 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Host string `config:"host"` Host string `config:"host"`
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Port string `config:"port"` Port string `config:"port"`
Concurrency int `config:"concurrency"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -70,6 +77,7 @@ type Fs struct {
dialAddr string dialAddr string
poolMu sync.Mutex poolMu sync.Mutex
pool []*ftp.ServerConn pool []*ftp.ServerConn
tokens *pacer.TokenDispenser
} }
// Object describes an FTP file // Object describes an FTP file
@@ -128,6 +136,9 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
// Get an FTP connection from the pool, or open a new one // Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) { func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
f.poolMu.Lock() f.poolMu.Lock()
if len(f.pool) > 0 { if len(f.pool) > 0 {
c = f.pool[0] c = f.pool[0]
@@ -147,6 +158,9 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
// if err is not nil then it checks the connection is alive using a // if err is not nil then it checks the connection is alive using a
// NOOP request // NOOP request
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) { func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
c := *pc c := *pc
*pc = nil *pc = nil
if err != nil { if err != nil {
@@ -198,6 +212,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
user: user, user: user,
pass: pass, pass: pass,
dialAddr: dialAddr, dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,

View File

@@ -16,6 +16,7 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
*/ */
import ( import (
"context"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@@ -45,6 +46,8 @@ import (
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
// NOTE: This API is deprecated
storage "google.golang.org/api/storage/v1" storage "google.golang.org/api/storage/v1"
) )
@@ -144,6 +147,22 @@ func init() {
Value: "publicReadWrite", Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.", Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}}, }},
}, {
Name: "bucket_policy_only",
Help: `Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
`,
Default: false,
}, { }, {
Name: "location", Name: "location",
Help: "Location for the newly created buckets.", Help: "Location for the newly created buckets.",
@@ -241,6 +260,7 @@ type Options struct {
ServiceAccountCredentials string `config:"service_account_credentials"` ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"` ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"` BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"` Location string `config:"location"`
StorageClass string `config:"storage_class"` StorageClass string `config:"storage_class"`
} }
@@ -256,7 +276,7 @@ type Fs struct {
bucket string // the bucket we are working on bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket bucketOK bool // true if we have created the bucket
pacer *pacer.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
} }
// Object describes a storage object // Object describes a storage object
@@ -381,7 +401,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else { } else {
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig) oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage") ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
} }
} }
@@ -395,7 +419,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
bucket: bucket, bucket: bucket,
root: directory, root: directory,
opt: *opt, opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer), pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
@@ -709,8 +733,19 @@ func (f *Fs) Mkdir(dir string) (err error) {
Location: f.opt.Location, Location: f.opt.Location,
StorageClass: f.opt.StorageClass, StorageClass: f.opt.StorageClass,
} }
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do() insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err == nil { if err == nil {
@@ -976,7 +1011,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
var newObject *storage.Object var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do() insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {

View File

@@ -6,6 +6,7 @@ package http
import ( import (
"io" "io"
"mime"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -44,6 +45,22 @@ func init() {
Value: "https://user:pass@example.com", Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password", Help: "Connect to example.com using a username and password",
}}, }},
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
Use this if your target website does not use / on the end of
directories.
A / on the end of a path is how rclone normally tells the difference
between files and directories. If this flag is set, then rclone will
treat all files with Content-Type: text/html as directories and read
URLs from them rather than downloading them.
Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -52,6 +69,7 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Endpoint string `config:"url"` Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
} }
// Fs stores the interface to the remote HTTP files // Fs stores the interface to the remote HTTP files
@@ -270,14 +288,20 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
var walk func(*html.Node) var (
walk func(*html.Node)
seen = make(map[string]struct{})
)
walk = func(n *html.Node) { walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" { if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr { for _, a := range n.Attr {
if a.Key == "href" { if a.Key == "href" {
name, err := parseName(base, a.Val) name, err := parseName(base, a.Val)
if err == nil { if err == nil {
names = append(names, name) if _, found := seen[name]; !found {
names = append(names, name)
seen[name] = struct{}{}
}
} }
break break
} }
@@ -302,14 +326,16 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL) return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
} }
res, err := f.httpClient.Get(URL) res, err := f.httpClient.Get(URL)
if err == nil && res.StatusCode == http.StatusNotFound { if err == nil {
return nil, fs.ErrorDirNotFound defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
} }
err = statusError(res, err) err = statusError(res, err)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to readDir") return nil, errors.Wrap(err, "failed to readDir")
} }
defer fs.CheckClose(res.Body, &err)
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0] contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
switch contentType { switch contentType {
@@ -353,11 +379,16 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs: f, fs: f,
remote: remote, remote: remote,
} }
if err = file.stat(); err != nil { switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
fs.Debugf(remote, "skipping because of error: %v", err) fs.Debugf(remote, "skipping because of error: %v", err)
continue
} }
entries = append(entries, file)
} }
} }
return entries, nil return entries, nil
@@ -433,6 +464,16 @@ func (o *Object) stat() error {
o.size = parseInt64(res.Header.Get("Content-Length"), -1) o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t o.modTime = t
o.contentType = res.Header.Get("Content-Type") o.contentType = res.Header.Get("Content-Type")
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
}
}
return nil return nil
} }

View File

@@ -65,7 +65,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
return f, tidy return f, tidy
} }
func testListRoot(t *testing.T, f fs.Fs) { func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
entries, err := f.List("") entries, err := f.List("")
require.NoError(t, err) require.NoError(t, err)
@@ -93,15 +93,29 @@ func testListRoot(t *testing.T, f fs.Fs) {
e = entries[3] e = entries[3]
assert.Equal(t, "two.html", e.Remote()) assert.Equal(t, "two.html", e.Remote())
assert.Equal(t, int64(7), e.Size()) if noSlash {
_, ok = e.(*Object) assert.Equal(t, int64(-1), e.Size())
assert.True(t, ok) _, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(41), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
} }
func TestListRoot(t *testing.T) { func TestListRoot(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
testListRoot(t, f) testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
} }
func TestListSubDir(t *testing.T) { func TestListSubDir(t *testing.T) {
@@ -194,7 +208,7 @@ func TestIsAFileRoot(t *testing.T) {
f, err := NewFs(remoteName, "one%.txt", m) f, err := NewFs(remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f) testListRoot(t, f, false)
} }
func TestIsAFileSubDir(t *testing.T) { func TestIsAFileSubDir(t *testing.T) {

View File

@@ -1 +1 @@
potato <a href="two.html/file.txt">file.txt</a>

View File

@@ -190,7 +190,7 @@ type Fs struct {
endpointURL string endpointURL string
srv *rest.Client srv *rest.Client
apiSrv *rest.Client apiSrv *rest.Client
pacer *pacer.Pacer pacer *fs.Pacer
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
} }
@@ -381,6 +381,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
rootIsDir := strings.HasSuffix(root, "/") rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root) root = parsePath(root)
// add jottacloud to the long list of sites that don't follow the oauth spec correctly
oauth2.RegisterBrokenAuthHeaderProvider("https://www.jottacloud.com/")
// the oauth client for the api servers needs // the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above) // a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config) baseClient := fshttp.NewClient(fs.Config)
@@ -403,7 +406,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL), apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,

589
backend/koofr/koofr.go Normal file
View File

@@ -0,0 +1,589 @@
package koofr
import (
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
},
},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
}
// A Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
root string
opt Options
features *fs.Features
client *koofrclient.KoofrClient
}
// An Object on the remote Koofr Fs
type Object struct {
fs *Fs
remote string
info koofrclient.FileInfo
}
func base(pth string) string {
rv := path.Base(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
func dir(pth string) string {
rv := path.Dir(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
// String returns a string representation of the remote Object
func (o *Object) String() string {
return o.remote
}
// Remote returns the remote path of the Object, relative to Fs root
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the Object
func (o *Object) ModTime() time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
}
// Size return the size of the Object in bytes
func (o *Object) Size() int64 {
return o.info.Size
}
// Fs returns a reference to the Koofr Fs containing the Object
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns an MD5 hash of the Object
func (o *Object) Hash(typ hash.Type) (string, error) {
if typ == hash.MD5 {
return o.info.Hash, nil
}
return "", nil
}
// fullPath returns full path of the remote Object (including Fs root)
func (o *Object) fullPath() string {
return o.fs.fullPath(o.remote)
}
// Storable returns true if the Object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime is not supported
func (o *Object) SetModTime(mtime time.Time) error {
return nil
}
// Open opens the Object for reading
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
sOff = x.Offset
case *fs.RangeOption:
sOff = x.Start
eOff = x.End
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,
}
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
}
// Update updates the Object contents
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := o.fullPath()
dirPath := dir(fullPath)
name := base(fullPath)
err := o.fs.mkdir(dirPath)
if err != nil {
return err
}
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil {
return err
}
o.info = *info
return nil
}
// Remove deletes the remote Object
func (o *Object) Remove() error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
}
// Name returns the name of the Fs
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path of the Fs
func (f *Fs) Root() string {
return f.root
}
// String returns a string representation of the Fs
func (f *Fs) String() string {
return "koofr:" + f.mountID + ":" + f.root
}
// Features returns the optional features supported by this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns a set of hashes are Provided by the Fs
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return path.Join("/", f.root, part)
}
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err
}
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
mounts, err := client.Mounts()
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
client: client,
}
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {
f.mountID = m.Id
break
}
} else if m.IsPrimary {
f.mountID = m.Id
break
}
}
if f.mountID == "" {
if opt.MountID == "" {
return nil, errors.New("Failed to find primary mount")
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
} else {
err = nil
}
return f, err
}
// List returns a list of items in a directory
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return nil, translateErrorsDir(err)
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
if file.Type == "dir" {
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,
info: file,
remote: path.Join(dir, file.Name),
}
}
}
return entries, nil
}
// NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil {
return nil, translateErrorsObject(err)
}
if info.Type == "dir" {
return nil, fs.ErrorNotAFile
}
return &Object{
fs: f,
info: info,
remote: remote,
}, nil
}
// Put updates a remote Object
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath)
name := base(fullPath)
err = f.mkdir(dirPath)
if err != nil {
return nil, err
}
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
if err != nil {
return nil, translateErrorsObject(err)
}
return &Object{
fs: f,
info: *info,
remote: src.Remote(),
}, nil
}
// PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// isBadRequest is a predicate which holds true iff the error returned was
// HTTP status 400
func isBadRequest(err error) bool {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusBadRequest {
return true
}
}
return false
}
// translateErrorsDir translates koofr errors to rclone errors (for a dir
// operation)
func translateErrorsDir(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorDirNotFound
}
}
return err
}
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
func translateErrorsObject(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// neccessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
}
info, err := f.client.FilesInfo(f.mountID, fullPath)
if err == nil && info.Type == "dir" {
return nil
}
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
dirs := strings.Split(fullPath, "/")
parent := "/"
for _, part := range dirs {
if part == "" {
continue
}
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
if err != nil || info.Type != "dir" {
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
err = f.client.FilesNewFolder(f.mountID, parent, part)
if err != nil && !isBadRequest(err) {
return err
}
}
parent = path.Join(parent, part)
}
return nil
}
// Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
func (f *Fs) Mkdir(dir string) error {
fullPath := f.fullPath(dir)
return f.mkdir(fullPath)
}
// Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
if len(files) > 0 {
return fs.ErrorDirectoryNotEmpty
}
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
return nil
}
// Copy copies a remote Object to the given path
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantCopy
}
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantCopy
}
return f.NewObject(remote)
}
// Move moves a remote Object to the given path
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object)
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantMove
}
err = f.client.FilesMove(srcObj.fs.mountID,
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantMove
}
return f.NewObject(remote)
}
// DirMove moves a remote directory to the given path
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote)
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
return fs.ErrorDirExists
}
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return fs.ErrorCantDirMove
}
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
if err != nil {
return fs.ErrorCantDirMove
}
return nil
}
// About reports space usage (with a MB precision)
func (f *Fs) About() (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
Trashed: nil,
Other: nil,
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
Objects: nil,
}, nil
}
// Purge purges the complete Fs
func (f *Fs) Purge() error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err
}
// linkCreate is a Koofr API request for creating a public link
type linkCreate struct {
Path string `json:"path"`
}
// link is a Koofr API response to creating a public link
type link struct {
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Counter int64 `json:"counter"`
URL string `json:"url"`
ShortURL string `json:"shortUrl"`
Hash string `json:"hash"`
Host string `json:"host"`
HasPassword bool `json:"hasPassword"`
Password string `json:"password"`
ValidFrom int64 `json:"validFrom"`
ValidTo int64 `json:"validTo"`
PasswordRequired bool `json:"passwordRequired"`
}
// createLink makes a Koofr API call to create a public link
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
linkCreate := linkCreate{
Path: path,
}
linkData := link{}
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/mounts/" + mountID + "/links",
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: linkCreate,
RespEncoding: httpclient.EncodingJSON,
RespValue: &linkData,
}
_, err := c.Request(&request)
if err != nil {
return nil, err
}
return &linkData, nil
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)
}
return linkData.ShortURL, nil
}

View File

@@ -0,0 +1,14 @@
package koofr_test
import (
"testing"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestKoofr:",
})
}

View File

@@ -98,7 +98,7 @@ type Fs struct {
opt Options // parsed config options opt Options // parsed config options
features *fs.Features // optional features features *fs.Features // optional features
srv *mega.Mega // the connection to the server srv *mega.Mega // the connection to the server
pacer *pacer.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
rootNodeMu sync.Mutex // mutex for _rootNode rootNodeMu sync.Mutex // mutex for _rootNode
_rootNode *mega.Node // root node - call findRoot to use this _rootNode *mega.Node // root node - call findRoot to use this
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
@@ -217,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root, root: root,
opt: *opt, opt: *opt,
srv: srv, srv: srv,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
DuplicateFiles: true, DuplicateFiles: true,

View File

@@ -261,7 +261,7 @@ type Fs struct {
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
@@ -335,8 +335,13 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID. // readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
// if `relPath` == "", it reads the metadata for the item with that ID. // if `relPath` == "", it reads the metadata for the item with that ID.
//
// We address items using the pattern `drives/driveID/items/itemID:/relativePath`
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) { func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath))) opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info) resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
@@ -475,7 +480,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
driveID: opt.DriveID, driveID: opt.DriveID,
driveType: opt.DriveType, driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID), srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -703,9 +708,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
id := info.GetID() id := info.GetID()
f.dirCache.Put(remote, id) f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id) d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
if folder != nil { d.SetItems(folder.ChildCount)
d.SetItems(folder.ChildCount)
}
entries = append(entries, d) entries = append(entries, d)
} else { } else {
o, err := f.newObjectWithInfo(remote, info) o, err := f.newObjectWithInfo(remote, info)
@@ -819,9 +822,6 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
return err return err
} }
f.dirCache.FlushDir(dir) f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil return nil
} }
@@ -1340,12 +1340,12 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts = rest.Opts{ opts = rest.Opts{
Method: "PATCH", Method: "PATCH",
RootURL: rootURL, RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf), Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
} }
} else { } else {
opts = rest.Opts{ opts = rest.Opts{
Method: "PATCH", Method: "PATCH",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()), Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
} }
} }
update := api.SetFileSystemInfo{ update := api.SetFileSystemInfo{
@@ -1488,7 +1488,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
// uploadMultipart uploads a file using multipart upload // uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size <= 0 { if size <= 0 {
panic("size passed into uploadMultipart must be > 0") return nil, errors.New("unknown-sized upload not supported")
} }
// Create upload session // Create upload session
@@ -1535,7 +1535,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
// This function will set modtime after uploading, which will create a new version for the remote file // This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) { if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB") return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
} }
fs.Debugf(o, "Starting singlepart upload") fs.Debugf(o, "Starting singlepart upload")
@@ -1668,6 +1668,21 @@ func getRelativePathInsideBase(base, target string) (string, bool) {
return "", false return "", false
} }
// Adds a ":" at the end of `remotePath` in a proper manner.
// If `remotePath` already ends with "/", change it to ":/"
// If `remotePath` is "", return "".
// A workaround for #2720 and #3039
func withTrailingColon(remotePath string) string {
if remotePath == "" {
return ""
}
if strings.HasSuffix(remotePath, "/") {
return remotePath[:len(remotePath)-1] + ":/"
}
return remotePath + ":"
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)

View File

@@ -65,7 +65,7 @@ type Fs struct {
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
pacer *pacer.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
session UserSessionInfo // contains the session data session UserSessionInfo // contains the session data
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
} }
@@ -144,7 +144,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.dirCache = dircache.New(root, "0", f) f.dirCache = dircache.New(root, "0", f)
@@ -287,9 +287,6 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
return err return err
} }
f.dirCache.FlushDir(dir) f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil return nil
} }

View File

@@ -161,7 +161,6 @@ type UserInfo struct {
PublicLinkQuota int64 `json:"publiclinkquota"` PublicLinkQuota int64 `json:"publiclinkquota"`
Email string `json:"email"` Email string `json:"email"`
UserID int `json:"userid"` UserID int `json:"userid"`
Result int `json:"result"`
Quota int64 `json:"quota"` Quota int64 `json:"quota"`
TrashRevretentionDays int `json:"trashrevretentiondays"` TrashRevretentionDays int `json:"trashrevretentiondays"`
Premium bool `json:"premium"` Premium bool `json:"premium"`

View File

@@ -95,7 +95,7 @@ type Fs struct {
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
} }
@@ -254,7 +254,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: false, CaseInsensitive: false,

View File

@@ -346,7 +346,7 @@ func init() {
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.", Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba", Provider: "!AWS,IBMCOS,Alibaba",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "objects-us-west-1.dream.io", Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint", Help: "Dream Objects endpoint",
Provider: "Dreamhost", Provider: "Dreamhost",
}, { }, {
@@ -782,7 +782,7 @@ type Fs struct {
bucketOKMu sync.Mutex // mutex to protect bucket OK bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket bucketDeleted bool // true if we have deleted the bucket
pacer *pacer.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client srv *http.Client // a plain http client
} }
@@ -1055,7 +1055,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
c: c, c: c,
bucket: bucket, bucket: bucket,
ses: ses, ses: ses,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer), pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
srv: fshttp.NewClient(fs.Config), srv: fshttp.NewClient(fs.Config),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -427,6 +427,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass)) sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
} }
return NewFsWithConnection(name, root, opt, sshConfig)
}
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
// the host specified in the ssh.ClientConfig
func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,

View File

@@ -2,6 +2,7 @@ package swift
import ( import (
"net/http" "net/http"
"time"
"github.com/ncw/swift" "github.com/ncw/swift"
) )
@@ -65,6 +66,14 @@ func (a *auth) Token() string {
return a.parentAuth.Token() return a.parentAuth.Token()
} }
// Expires returns the time the token expires if known or Zero if not.
func (a *auth) Expires() (t time.Time) {
if do, ok := a.parentAuth.(swift.Expireser); ok {
t = do.Expires()
}
return t
}
// The CDN url if available // The CDN url if available
func (a *auth) CdnUrl() string { // nolint func (a *auth) CdnUrl() string { // nolint
if a.parentAuth == nil { if a.parentAuth == nil {
@@ -74,4 +83,7 @@ func (a *auth) CdnUrl() string { // nolint
} }
// Check the interfaces are satisfied // Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil) var (
_ swift.Authenticator = (*auth)(nil)
_ swift.Expireser = (*auth)(nil)
)

View File

@@ -216,7 +216,7 @@ type Fs struct {
containerOK bool // true if we have created the container containerOK bool // true if we have created the container
segmentsContainer string // container to store the segments (if any) in segmentsContainer string // container to store the segments (if any) in
noCheckContainer bool // don't check the container before creating it noCheckContainer bool // don't check the container before creating it
pacer *pacer.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
} }
// Object describes a swift object // Object describes a swift object
@@ -401,7 +401,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
segmentsContainer: container + "_segments", segmentsContainer: container + "_segments",
root: directory, root: directory,
noCheckContainer: noCheckContainer, noCheckContainer: noCheckContainer,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer), pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,

View File

@@ -69,7 +69,7 @@ type Prop struct {
Status []string `xml:"DAV: status"` Status []string `xml:"DAV: status"`
Name string `xml:"DAV: prop>displayname,omitempty"` Name string `xml:"DAV: prop>displayname,omitempty"`
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"` Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
IsCollection *int `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716 IsCollection *string `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"` Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"` Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
Checksums []string `xml:"prop>checksums>checksum,omitempty"` Checksums []string `xml:"prop>checksums>checksum,omitempty"`

View File

@@ -101,7 +101,7 @@ type Fs struct {
endpoint *url.URL // URL of the host endpoint *url.URL // URL of the host
endpointURL string // endpoint as a string endpointURL string // endpoint as a string
srv *rest.Client // the connection to the one drive server srv *rest.Client // the connection to the one drive server
pacer *pacer.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
precision time.Duration // mod time precision precision time.Duration // mod time precision
canStream bool // set if can stream canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime useOCMtime bool // set if can use X-OC-Mtime
@@ -173,9 +173,16 @@ func itemIsDir(item *api.Response) bool {
fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name) fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name)
} }
// the iscollection prop is a Microsoft extension, but if present it is a reliable indicator // the iscollection prop is a Microsoft extension, but if present it is a reliable indicator
// if the above check failed - see #2716 // if the above check failed - see #2716. This can be an integer or a boolean - see #2964
if t := item.Props.IsCollection; t != nil { if t := item.Props.IsCollection; t != nil {
return *t != 0 switch x := strings.ToLower(*t); x {
case "0", "false":
return false
case "1", "true":
return true
default:
fs.Debugf(nil, "Unknown value %q for IsCollection", x)
}
} }
return false return false
} }
@@ -311,7 +318,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
endpoint: u, endpoint: u,
endpointURL: u.String(), endpointURL: u.String(),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()), srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
precision: fs.ModTimeNotSupported, precision: fs.ModTimeNotSupported,
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -637,10 +644,18 @@ func (f *Fs) _mkdir(dirPath string) error {
Path: dirPath, Path: dirPath,
NoResponse: true, NoResponse: true,
} }
return f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(&opts) resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if apiErr, ok := err.(*api.Error); ok {
// already exists
// owncloud returns 423/StatusLocked if the create is already in progress
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
return nil
}
}
return err
} }
// mkdir makes the directory and parents using native paths // mkdir makes the directory and parents using native paths
@@ -648,12 +663,7 @@ func (f *Fs) mkdir(dirPath string) error {
// defer log.Trace(dirPath, "")("") // defer log.Trace(dirPath, "")("")
err := f._mkdir(dirPath) err := f._mkdir(dirPath)
if apiErr, ok := err.(*api.Error); ok { if apiErr, ok := err.(*api.Error); ok {
// already exists // parent does not exist so create it first then try again
// owncloud returns 423/StatusLocked if the create is already in progress
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
return nil
}
// parent does not exist
if apiErr.StatusCode == http.StatusConflict { if apiErr.StatusCode == http.StatusConflict {
err = f.mkParentDir(dirPath) err = f.mkParentDir(dirPath)
if err == nil { if err == nil {
@@ -906,11 +916,13 @@ func (f *Fs) About() (*fs.Usage, error) {
return nil, errors.Wrap(err, "about call failed") return nil, errors.Wrap(err, "about call failed")
} }
usage := &fs.Usage{} usage := &fs.Usage{}
if q.Available >= 0 && q.Used >= 0 { if q.Available != 0 || q.Used != 0 {
usage.Total = fs.NewUsageValue(q.Available + q.Used) if q.Available >= 0 && q.Used >= 0 {
} usage.Total = fs.NewUsageValue(q.Available + q.Used)
if q.Used >= 0 { }
usage.Used = fs.NewUsageValue(q.Used) if q.Used >= 0 {
usage.Used = fs.NewUsageValue(q.Used)
}
} }
return usage, nil return usage, nil
} }

View File

@@ -93,7 +93,7 @@ type Fs struct {
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the yandex server srv *rest.Client // the connection to the yandex server
pacer *pacer.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
diskRoot string // root path with "disk:/" container name diskRoot string // root path with "disk:/" container name
} }
@@ -269,7 +269,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
name: name, name: name,
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.setRoot(root) f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -17,14 +17,18 @@ import (
"io/ioutil" "io/ioutil"
"log" "log"
"net/http" "net/http"
"net/url"
"os" "os"
"os/exec" "os/exec"
"path"
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/lib/rest"
"golang.org/x/net/html"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@@ -33,6 +37,7 @@ var (
install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.") install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.")
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.") extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.") bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.")
// Globals // Globals
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`) matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
osAliases = map[string][]string{ osAliases = map[string][]string{
@@ -209,6 +214,57 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
return "", "" return "", ""
} }
// Get an asset URL and name by scraping the downloads page
//
// This doesn't use the API so isn't rate limited when not using GITHUB login details
func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) {
baseURL := "https://github.com/" + project + "/releases"
log.Printf("Fetching asset info for %q from %q", project, baseURL)
base, err := url.Parse(baseURL)
if err != nil {
log.Fatalf("URL Parse failed: %v", err)
}
resp, err := http.Get(baseURL)
if err != nil {
log.Fatalf("Failed to fetch release info %q: %v", baseURL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Printf("Error: %s", readBody(resp.Body))
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status)
}
doc, err := html.Parse(resp.Body)
if err != nil {
log.Fatalf("Failed to parse web page: %v", err)
}
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
if u, err := rest.URLJoin(base, a.Val); err == nil {
if assetName == "" {
assetName = name
assetURL = u.String()
}
}
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
walk(c)
}
}
walk(doc)
if assetName == "" || assetURL == "" {
log.Fatalf("Didn't find URL in page")
}
return assetURL, assetName
}
// isOurOsArch returns true if s contains our OS and our Arch // isOurOsArch returns true if s contains our OS and our Arch
func isOurOsArch(s string) bool { func isOurOsArch(s string) bool {
s = strings.ToLower(s) s = strings.ToLower(s)
@@ -346,7 +402,12 @@ func main() {
log.Fatalf("Invalid regexp for name %q: %v", nameRe, err) log.Fatalf("Invalid regexp for name %q: %v", nameRe, err)
} }
assetURL, assetName := getAsset(project, matchName) var assetURL, assetName string
if *useAPI {
assetURL, assetName = getAsset(project, matchName)
} else {
assetURL, assetName = getAssetFromReleasesPage(project, matchName)
}
fileName := filepath.Join(os.TempDir(), assetName) fileName := filepath.Join(os.TempDir(), assetName)
getFile(assetURL, fileName) getFile(assetURL, fileName)

View File

@@ -36,6 +36,7 @@ docs = [
"http.md", "http.md",
"hubic.md", "hubic.md",
"jottacloud.md", "jottacloud.md",
"koofr.md",
"mega.md", "mega.md",
"azureblob.md", "azureblob.md",
"onedrive.md", "onedrive.md",

View File

@@ -29,7 +29,7 @@ github-release release \
--name "rclone" \ --name "rclone" \
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers." --description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
for build in `ls build | grep -v current`; do for build in `ls build | grep -v current | grep -v testbuilds`; do
echo "Uploading ${build}" echo "Uploading ${build}"
base="${build%.*}" base="${build%.*}"
parts=(${base//-/ }) parts=(${base//-/ })

View File

@@ -341,8 +341,7 @@ func initConfig() {
configflags.SetFlags() configflags.SetFlags()
// Load filters // Load filters
var err error err := filterflags.Reload()
filter.Active, err = filter.NewFilter(&filterflags.Opt)
if err != nil { if err != nil {
log.Fatalf("Failed to load filters: %v", err) log.Fatalf("Failed to load filters: %v", err)
} }

View File

@@ -7,8 +7,13 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
var (
createEmptySrcDirs = false
)
func init() { func init() {
cmd.Root.AddCommand(commandDefintion) cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
} }
var commandDefintion = &cobra.Command{ var commandDefintion = &cobra.Command{
@@ -69,7 +74,7 @@ changed recently very efficiently like this:
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error { cmd.Run(true, true, command, func() error {
if srcFileName == "" { if srcFileName == "" {
return sync.CopyDir(fdst, fsrc) return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
} }
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName) return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
}) })

View File

@@ -48,7 +48,7 @@ destination.
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args) fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
cmd.Run(true, true, command, func() error { cmd.Run(true, true, command, func() error {
if srcFileName == "" { if srcFileName == "" {
return sync.CopyDir(fdst, fsrc) return sync.CopyDir(fdst, fsrc, false)
} }
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName) return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
}) })

View File

@@ -37,7 +37,7 @@ documentation, changelog and configuration walkthroughs.
const ( const (
bashCompletionFunc = ` bashCompletionFunc = `
__custom_func() { __rclone_custom_func() {
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
local cur cword prev words local cur cword prev words
if declare -F _init_completion > /dev/null; then if declare -F _init_completion > /dev/null; then
@@ -45,7 +45,7 @@ __custom_func() {
else else
__rclone_init_completion -n : || return __rclone_init_completion -n : || return
fi fi
if [[ $cur =~ ^[[:alnum:]]*$ ]]; then if [[ $cur != *:* ]]; then
local remote local remote
while IFS= read -r remote; do while IFS= read -r remote; do
[[ $remote != $cur* ]] || COMPREPLY+=("$remote") [[ $remote != $cur* ]] || COMPREPLY+=("$remote")
@@ -54,10 +54,10 @@ __custom_func() {
local paths=("$cur"*) local paths=("$cur"*)
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}") [[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
fi fi
elif [[ $cur =~ ^[[:alnum:]]+: ]]; then else
local path=${cur#*:} local path=${cur#*:}
if [[ $path == */* ]]; then if [[ $path == */* ]]; then
local prefix=${path%/*} local prefix=$(eval printf '%s' "${path%/*}")
else else
local prefix= local prefix=
fi fi
@@ -66,6 +66,7 @@ __custom_func() {
local reply=${prefix:+$prefix/}$line local reply=${prefix:+$prefix/}$line
[[ $reply != $path* ]] || COMPREPLY+=("$reply") [[ $reply != $path* ]] || COMPREPLY+=("$reply")
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null) done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
[[ ! ${COMPREPLY[@]} ]] || compopt -o filenames
fi fi
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace [[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
fi fi

View File

@@ -21,11 +21,22 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
type position int
const (
positionMiddle position = 1 << iota
positionLeft
positionRight
positionNone position = 0
positionAll position = positionRight<<1 - 1
)
var ( var (
checkNormalization bool checkNormalization bool
checkControl bool checkControl bool
checkLength bool checkLength bool
checkStreaming bool checkStreaming bool
positionList = []position{positionMiddle, positionLeft, positionRight}
) )
func init() { func init() {
@@ -59,7 +70,7 @@ a bit of go code for each one.
type results struct { type results struct {
f fs.Fs f fs.Fs
mu sync.Mutex mu sync.Mutex
charNeedsEscaping map[rune]bool stringNeedsEscaping map[string]position
maxFileLength int maxFileLength int
canWriteUnnormalized bool canWriteUnnormalized bool
canReadUnnormalized bool canReadUnnormalized bool
@@ -69,8 +80,8 @@ type results struct {
func newResults(f fs.Fs) *results { func newResults(f fs.Fs) *results {
return &results{ return &results{
f: f, f: f,
charNeedsEscaping: make(map[rune]bool), stringNeedsEscaping: make(map[string]position),
} }
} }
@@ -79,13 +90,13 @@ func (r *results) Print() {
fmt.Printf("// %s\n", r.f.Name()) fmt.Printf("// %s\n", r.f.Name())
if checkControl { if checkControl {
escape := []string{} escape := []string{}
for c, needsEscape := range r.charNeedsEscaping { for c, needsEscape := range r.stringNeedsEscaping {
if needsEscape { if needsEscape != positionNone {
escape = append(escape, fmt.Sprintf("0x%02X", c)) escape = append(escape, fmt.Sprintf("0x%02X", c))
} }
} }
sort.Strings(escape) sort.Strings(escape)
fmt.Printf("charNeedsEscaping = []byte{\n") fmt.Printf("stringNeedsEscaping = []byte{\n")
fmt.Printf("\t%s\n", strings.Join(escape, ", ")) fmt.Printf("\t%s\n", strings.Join(escape, ", "))
fmt.Printf("}\n") fmt.Printf("}\n")
} }
@@ -130,20 +141,45 @@ func (r *results) checkUTF8Normalization() {
} }
} }
// check we can write file with the rune passed in func (r *results) checkStringPositions(s string) {
func (r *results) checkChar(c rune) { fs.Infof(r.f, "Writing position file 0x%0X", s)
fs.Infof(r.f, "Writing file 0x%02X", c) positionError := positionNone
path := fmt.Sprintf("0x%02X-%c-", c, c)
_, err := r.writeFile(path) for _, pos := range positionList {
escape := false path := ""
if err != nil { switch pos {
fs.Infof(r.f, "Couldn't write file 0x%02X", c) case positionMiddle:
escape = true path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
} else { case positionLeft:
fs.Infof(r.f, "OK writing file 0x%02X", c) path = fmt.Sprintf("%s-position-left-%0X", s, s)
case positionRight:
path = fmt.Sprintf("position-right-%0X-%s", s, s)
default:
panic("invalid position: " + pos.String())
}
_, writeErr := r.writeFile(path)
if writeErr != nil {
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeErr)
} else {
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
}
obj, getErr := r.f.NewObject(path)
if getErr != nil {
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
} else {
if obj.Size() != 50 {
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
} else {
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
}
}
if writeErr != nil || getErr != nil {
positionError += pos
}
} }
r.mu.Lock() r.mu.Lock()
r.charNeedsEscaping[c] = escape r.stringNeedsEscaping[s] = positionError
r.mu.Unlock() r.mu.Unlock()
} }
@@ -157,19 +193,28 @@ func (r *results) checkControls() {
} }
var wg sync.WaitGroup var wg sync.WaitGroup
for i := rune(0); i < 128; i++ { for i := rune(0); i < 128; i++ {
s := string(i)
if i == 0 || i == '/' { if i == 0 || i == '/' {
// We're not even going to check NULL or / // We're not even going to check NULL or /
r.charNeedsEscaping[i] = true r.stringNeedsEscaping[s] = positionAll
continue continue
} }
wg.Add(1) wg.Add(1)
c := i go func(s string) {
go func() {
defer wg.Done() defer wg.Done()
token := <-tokens token := <-tokens
r.checkChar(c) r.checkStringPositions(s)
tokens <- token tokens <- token
}() }(s)
}
for _, s := range []string{"", "\xBF", "\xFE"} {
wg.Add(1)
go func(s string) {
defer wg.Done()
token := <-tokens
r.checkStringPositions(s)
tokens <- token
}(s)
} }
wg.Wait() wg.Wait()
fs.Infof(r.f, "Done trying to create control character file names") fs.Infof(r.f, "Done trying to create control character file names")
@@ -268,3 +313,35 @@ func readInfo(f fs.Fs) error {
r.Print() r.Print()
return nil return nil
} }
func (e position) String() string {
switch e {
case positionNone:
return "none"
case positionAll:
return "all"
}
var buf bytes.Buffer
if e&positionMiddle != 0 {
buf.WriteString("middle")
e &= ^positionMiddle
}
if e&positionLeft != 0 {
if buf.Len() != 0 {
buf.WriteRune(',')
}
buf.WriteString("left")
e &= ^positionLeft
}
if e&positionRight != 0 {
if buf.Len() != 0 {
buf.WriteRune(',')
}
buf.WriteString("right")
e &= ^positionRight
}
if e != positionNone {
panic("invalid position")
}
return buf.String()
}

40
cmd/info/process.sh Normal file
View File

@@ -0,0 +1,40 @@
set -euo pipefail
for f in info-*.log; do
for pos in middle left right; do
egrep -oe " Writing $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.write_$pos
egrep -oe " Getting $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.get_$pos
done
{
echo "${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}"
echo "Write\tWrite\tWrite\tGet\tGet\tGet"
echo "Mid\tLeft\tRight\tMid\tLeft\tRight"
paste $f.write_{middle,left,right} $f.get_{middle,left,right}
} > $f.csv
done
for f in info-*.list; do
for pos in middle left right; do
cat $f | perl -lne 'print $1 if /^\s+[0-9]+\s+(.*)/' | grep -a "position-$pos-" | sort > $f.$pos
done
{
echo "${${f%.list}#info-}\t${${f%.list}#info-}\t${${f%.list}#info-}"
echo "List\tList\tList"
echo "Mid\tLeft\tRight"
for e in 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F BF EFBCBC FE; do
echo -n $(perl -lne 'print "'$e'-$1" if /^position-middle-'$e'-(.*)-/' $f.middle | tr -d "\t\r" | grep -a . || echo Miss)
echo -n "\t"
echo -n $(perl -lne 'print "'$e'-$1" if /^(.*)-position-left-'$e'/' $f.left | tr -d "\t\r" | grep -a . || echo Miss)
echo -n "\t"
echo $(perl -lne 'print "'$e'-$1" if /^position-right-'$e'-(.*)/' $f.right | tr -d "\t\r" | grep -a . || echo Miss)
# echo -n $(grep -a "position-middle-$e-" $f.middle | tr -d "\t\r" || echo Miss)"\t"
# echo -n $(grep -a "position-left-$e" $f.left | tr -d "\t\r" || echo Miss)"\t"
# echo $(grep -a "position-right-$e-" $f.right | tr -d "\t\r" || echo Miss)
done
} > $f.csv
done
for f in info-*.list; do
paste ${f%.list}.log.csv $f.csv > ${f%.list}.full.csv
done
paste *.full.csv > info-complete.csv

3
cmd/info/test.cmd Normal file
View File

@@ -0,0 +1,3 @@
rclone.exe purge info
rclone.exe info -vv info > info-LocalWindows.log 2>&1
rclone.exe ls -vv info > info-LocalWindows.list 2>&1

43
cmd/info/test.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env zsh
#
# example usage:
# $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh --list | \
# parallel -P20 $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh
export PATH=$GOPATH/src/github.com/ncw/rclone:$PATH
typeset -A allRemotes
allRemotes=(
TestAmazonCloudDrive '--low-level-retries=2 --checkers=5'
TestB2 ''
TestBox ''
TestDrive '--tpslimit=5'
TestCrypt ''
TestDropbox '--checkers=1'
TestJottacloud ''
TestMega ''
TestOneDrive ''
TestOpenDrive '--low-level-retries=2 --checkers=5'
TestPcloud '--low-level-retries=2 --timeout=15s'
TestS3 ''
Local ''
)
set -euo pipefail
if [[ $# -eq 0 ]]; then
set -- ${(k)allRemotes[@]}
elif [[ $1 = --list ]]; then
printf '%s\n' ${(k)allRemotes[@]}
exit 0
fi
for remote; do
dir=$remote:infotest
if [[ $remote = Local ]]; then
dir=infotest
fi
rclone purge $dir || :
rclone info -vv $dir ${=allRemotes[$remote]} &> info-$remote.log
rclone ls -vv $dir &> info-$remote.list
done

View File

@@ -10,7 +10,6 @@ import (
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -67,8 +66,10 @@ output:
s - size s - size
t - modification time t - modification time
h - hash h - hash
i - ID of object if known i - ID of object
o - Original ID of underlying object
m - MimeType of object if known m - MimeType of object if known
e - encrypted name
So if you wanted the path, size and modification time, you would use So if you wanted the path, size and modification time, you would use
--format "pst", or maybe --format "tsp" to put the path last. --format "pst", or maybe --format "tsp" to put the path last.
@@ -161,6 +162,10 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
list.SetCSV(csv) list.SetCSV(csv)
list.SetDirSlash(dirSlash) list.SetDirSlash(dirSlash)
list.SetAbsolute(absolute) list.SetAbsolute(absolute)
var opt = operations.ListJSONOpt{
NoModTime: true,
Recurse: recurse,
}
for _, char := range format { for _, char := range format {
switch char { switch char {
@@ -168,38 +173,38 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
list.AddPath() list.AddPath()
case 't': case 't':
list.AddModTime() list.AddModTime()
opt.NoModTime = false
case 's': case 's':
list.AddSize() list.AddSize()
case 'h': case 'h':
list.AddHash(hashType) list.AddHash(hashType)
opt.ShowHash = true
case 'i': case 'i':
list.AddID() list.AddID()
case 'm': case 'm':
list.AddMimeType() list.AddMimeType()
case 'e':
list.AddEncrypted()
opt.ShowEncrypted = true
case 'o':
list.AddOrigID()
opt.ShowOrigIDs = true
default: default:
return errors.Errorf("Unknown format character %q", char) return errors.Errorf("Unknown format character %q", char)
} }
} }
return walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error { return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
if err != nil { if item.IsDir {
fs.CountError(err) if filesOnly {
fs.Errorf(path, "error listing: %v", err) return nil
return nil }
} } else {
for _, entry := range entries { if dirsOnly {
_, isDir := entry.(fs.Directory) return nil
if isDir {
if filesOnly {
continue
}
} else {
if dirsOnly {
continue
}
} }
_, _ = fmt.Fprintln(out, list.Format(entry))
} }
_, _ = fmt.Fprintln(out, list.Format(item))
return nil return nil
}) })
} }

View File

@@ -37,6 +37,8 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
a.Crtime = modTime a.Crtime = modTime
// FIXME include Valid so get some caching? // FIXME include Valid so get some caching?
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a) // FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
a.Size = 512
a.Blocks = 1
return nil return nil
} }

View File

@@ -10,11 +10,13 @@ import (
// Globals // Globals
var ( var (
deleteEmptySrcDirs = false deleteEmptySrcDirs = false
createEmptySrcDirs = false
) )
func init() { func init() {
cmd.Root.AddCommand(commandDefintion) cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move") commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
} }
var commandDefintion = &cobra.Command{ var commandDefintion = &cobra.Command{
@@ -52,7 +54,7 @@ can speed transfers up greatly.
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error { cmd.Run(true, true, command, func() error {
if srcFileName == "" { if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs) return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
} }
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName) return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
}) })

View File

@@ -52,7 +52,7 @@ transfer.
cmd.Run(true, true, command, func() error { cmd.Run(true, true, command, func() error {
if srcFileName == "" { if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, false) return sync.MoveDir(fdst, fsrc, false, false)
} }
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName) return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
}) })

View File

@@ -10,6 +10,7 @@ import (
"sort" "sort"
"strings" "strings"
runewidth "github.com/mattn/go-runewidth"
"github.com/ncw/rclone/cmd" "github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ncdu/scan" "github.com/ncw/rclone/cmd/ncdu/scan"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
@@ -122,7 +123,7 @@ func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) { func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
for _, c := range msg { for _, c := range msg {
termbox.SetCell(x, y, c, fg, bg) termbox.SetCell(x, y, c, fg, bg)
x++ x += runewidth.RuneWidth(c)
if x >= xmax { if x >= xmax {
return return
} }

View File

@@ -0,0 +1,184 @@
package dlna
const connectionManagerServiceDescription = `<?xml version="1.0" encoding="UTF-8"?>
<scpd xmlns="urn:schemas-upnp-org:service-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<actionList>
<action>
<name>GetProtocolInfo</name>
<argumentList>
<argument>
<name>Source</name>
<direction>out</direction>
<relatedStateVariable>SourceProtocolInfo</relatedStateVariable>
</argument>
<argument>
<name>Sink</name>
<direction>out</direction>
<relatedStateVariable>SinkProtocolInfo</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>PrepareForConnection</name>
<argumentList>
<argument>
<name>RemoteProtocolInfo</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionManager</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>Direction</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
</argument>
<argument>
<name>ConnectionID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>AVTransportID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
</argument>
<argument>
<name>RcsID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>ConnectionComplete</name>
<argumentList>
<argument>
<name>ConnectionID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetCurrentConnectionIDs</name>
<argumentList>
<argument>
<name>ConnectionIDs</name>
<direction>out</direction>
<relatedStateVariable>CurrentConnectionIDs</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetCurrentConnectionInfo</name>
<argumentList>
<argument>
<name>ConnectionID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>RcsID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
</argument>
<argument>
<name>AVTransportID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
</argument>
<argument>
<name>ProtocolInfo</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionManager</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>Direction</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
</argument>
<argument>
<name>Status</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionStatus</relatedStateVariable>
</argument>
</argumentList>
</action>
</actionList>
<serviceStateTable>
<stateVariable sendEvents="yes">
<name>SourceProtocolInfo</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="yes">
<name>SinkProtocolInfo</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="yes">
<name>CurrentConnectionIDs</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ConnectionStatus</name>
<dataType>string</dataType>
<allowedValueList>
<allowedValue>OK</allowedValue>
<allowedValue>ContentFormatMismatch</allowedValue>
<allowedValue>InsufficientBandwidth</allowedValue>
<allowedValue>UnreliableChannel</allowedValue>
<allowedValue>Unknown</allowedValue>
</allowedValueList>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ConnectionManager</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_Direction</name>
<dataType>string</dataType>
<allowedValueList>
<allowedValue>Input</allowedValue>
<allowedValue>Output</allowedValue>
</allowedValueList>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ProtocolInfo</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ConnectionID</name>
<dataType>i4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_AVTransportID</name>
<dataType>i4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_RcsID</name>
<dataType>i4</dataType>
</stateVariable>
</serviceStateTable>
</scpd>`

View File

@@ -84,6 +84,21 @@ var services = []*service{
}, },
SCPD: contentDirectoryServiceDescription, SCPD: contentDirectoryServiceDescription,
}, },
{
Service: upnp.Service{
ServiceType: "urn:schemas-upnp-org:service:ConnectionManager:1",
ServiceId: "urn:upnp-org:serviceId:ConnectionManager",
ControlURL: serviceControlURL,
},
SCPD: connectionManagerServiceDescription,
},
}
func init() {
for _, s := range services {
p := path.Join("/scpd", s.ServiceId)
s.SCPDURL = p
}
} }
func devices() []string { func devices() []string {
@@ -250,9 +265,6 @@ func (s *server) initMux(mux *http.ServeMux) {
// Install handlers to serve SCPD for each UPnP service. // Install handlers to serve SCPD for each UPnP service.
for _, s := range services { for _, s := range services {
p := path.Join("/scpd", s.ServiceId)
s.SCPDURL = p
mux.HandleFunc(s.SCPDURL, func(serviceDesc string) http.HandlerFunc { mux.HandleFunc(s.SCPDURL, func(serviceDesc string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", `text/xml; charset="utf-8"`) w.Header().Set("content-type", `text/xml; charset="utf-8"`)

View File

@@ -59,6 +59,11 @@ func TestRootSCPD(t *testing.T) {
// Make sure that the SCPD contains a CDS service. // Make sure that the SCPD contains a CDS service.
require.Contains(t, string(body), require.Contains(t, string(body),
"<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>") "<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>")
// Make sure that the SCPD contains a CM service.
require.Contains(t, string(body),
"<serviceType>urn:schemas-upnp-org:service:ConnectionManager:1</serviceType>")
// Ensure that the SCPD url is configured.
require.Regexp(t, "<SCPDURL>/.*</SCPDURL>", string(body))
} }
// Make sure that it serves content from the remote. // Make sure that it serves content from the remote.

View File

@@ -330,25 +330,12 @@ func (s *server) listObjects(w http.ResponseWriter, r *http.Request, remote stri
ls := listItems{} ls := listItems{}
// if remote supports ListR use that directly, otherwise use recursive Walk // if remote supports ListR use that directly, otherwise use recursive Walk
var err error err := walk.ListR(s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
if ListR := s.f.Features().ListR; ListR != nil { for _, entry := range entries {
err = ListR(remote, func(entries fs.DirEntries) error { ls.add(entry)
for _, entry := range entries { }
ls.add(entry) return nil
} })
return nil
})
} else {
err = walk.Walk(s.f, remote, true, -1, func(path string, entries fs.DirEntries, err error) error {
if err == nil {
for _, entry := range entries {
ls.add(entry)
}
}
return err
})
}
if err != nil { if err != nil {
_, err = fserrors.Cause(err) _, err = fserrors.Cause(err)
if err != fs.ErrorDirNotFound { if err != fs.ErrorDirNotFound {

View File

@@ -6,8 +6,13 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
var (
createEmptySrcDirs = false
)
func init() { func init() {
cmd.Root.AddCommand(commandDefintion) cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
} }
var commandDefintion = &cobra.Command{ var commandDefintion = &cobra.Command{
@@ -39,7 +44,7 @@ go there.
cmd.CheckArgs(2, 2, command, args) cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args) fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(true, true, command, func() error { cmd.Run(true, true, command, func() error {
return sync.Sync(fdst, fsrc) return sync.Sync(fdst, fsrc, createEmptySrcDirs)
}) })
}, },
} }

View File

@@ -29,6 +29,7 @@ Rclone is a command line program to sync files and directories to and from:
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}} * {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}} * {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}} * {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}} * {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}} * {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}} * {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}

View File

@@ -236,3 +236,15 @@ Contributors
* weetmuts <oehrstroem@gmail.com> * weetmuts <oehrstroem@gmail.com>
* Jonathan <vanillajonathan@users.noreply.github.com> * Jonathan <vanillajonathan@users.noreply.github.com>
* James Carpenter <orbsmiv@users.noreply.github.com> * James Carpenter <orbsmiv@users.noreply.github.com>
* Vince <vince0villamora@gmail.com>
* Nestar47 <47841759+Nestar47@users.noreply.github.com>
* Six <brbsix@gmail.com>
* Alexandru Bumbacea <alexandru.bumbacea@booking.com>
* calisro <robert.calistri@gmail.com>
* Dr.Rx <david.rey@nventive.com>
* marcintustin <marcintustin@users.noreply.github.com>
* jaKa Močnik <jaka@koofr.net>
* Fionera <fionera@fionera.de>
* Dan Walters <dan@walters.io>
* Danil Semelenov <sgtpep@users.noreply.github.com>
* xopez <28950736+xopez@users.noreply.github.com>

View File

@@ -1,7 +1,7 @@
--- ---
title: "Documentation" title: "Documentation"
description: "Rclone Usage" description: "Rclone Usage"
date: "2015-06-06" date: "2019-02-25"
--- ---
Configure Configure
@@ -34,6 +34,7 @@ See the following for detailed instructions for
* [HTTP](/http/) * [HTTP](/http/)
* [Hubic](/hubic/) * [Hubic](/hubic/)
* [Jottacloud](/jottacloud/) * [Jottacloud](/jottacloud/)
* [Koofr](/koofr/)
* [Mega](/mega/) * [Mega](/mega/)
* [Microsoft Azure Blob Storage](/azureblob/) * [Microsoft Azure Blob Storage](/azureblob/)
* [Microsoft OneDrive](/onedrive/) * [Microsoft OneDrive](/onedrive/)
@@ -98,7 +99,7 @@ The main rclone commands with most used first
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output shell completion scripts for rclone. * [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output shell completion scripts for rclone.
* [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied. * [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied.
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file. * [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint. **EXPERIMENTAL** * [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint.
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest. * [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf * [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of a crypted remote. * [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of a crypted remote.
@@ -170,11 +171,24 @@ should be the name or prefix of a backend (the `type` in the config
file) and all the configuration for the backend should be provided on file) and all the configuration for the backend should be provided on
the command line (or in environment variables). the command line (or in environment variables).
Eg Here are some examples:
rclone lsd --http-url https://pub.rclone.org :http: rclone lsd --http-url https://pub.rclone.org :http:
Which lists all the directories in `pub.rclone.org`. To list all the directories in the root of `https://pub.rclone.org/`.
rclone lsf --http-url https://example.com :http:path/to/dir
To list files and directories in `https://example.com/path/to/dir/`
rclone copy --http-url https://example.com :http:path/to/dir /tmp/dir
To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`.
rclone copy --sftp-host example.com :sftp:path/to/dir /tmp/dir
To copy files and directories from `example.com` in the relative
directory `path/to/dir` to `/tmp/dir` using sftp.
Quoting and the shell Quoting and the shell
--------------------- ---------------------
@@ -670,6 +684,24 @@ uploaded compressed files.
There is no need to set this in normal operation, and doing so will There is no need to set this in normal operation, and doing so will
decrease the network transfer efficiency of rclone. decrease the network transfer efficiency of rclone.
### --no-traverse ###
The `--no-traverse` flag controls whether the destination file system
is traversed when using the `copy` or `move` commands.
`--no-traverse` is not compatible with `sync` and will be ignored if
you supply it with `sync`.
If you are only copying a small number of files (or are filtering most
of the files) and/or have a large number of files on the destination
then `--no-traverse` will stop rclone listing the destination and save
time.
However, if you are copying a large number of files, especially if you
are doing a copy where lots of the files under consideration haven't
changed and won't need copying then you shouldn't use `--no-traverse`.
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
### --no-update-modtime ### ### --no-update-modtime ###
When using this flag, rclone won't update modification times of remote When using this flag, rclone won't update modification times of remote
@@ -789,6 +821,16 @@ then the files will have SUFFIX added on to them.
See `--backup-dir` for more info. See `--backup-dir` for more info.
### --suffix-keep-extension ###
When using `--suffix`, setting this causes rclone put the SUFFIX
before the extension of the files that it backs up rather than after.
So let's say we had `--suffix -2019-01-01`, without the flag `file.txt`
would be backed up to `file.txt-2019-01-01` and with the flag it would
be backed up to `file-2019-01-01.txt`. This can be helpful to make
sure the suffixed files can still be opened.
### --syslog ### ### --syslog ###
On capable OSes (not Windows or Plan9) send all log output to syslog. On capable OSes (not Windows or Plan9) send all log output to syslog.
@@ -991,6 +1033,47 @@ with this setting.
Prints the version number Prints the version number
SSL/TLS options
---------------
The outoing SSL/TLS connections rclone makes can be controlled with
these options. For example this can be very useful with the HTTP or
WebDAV backends. Rclone HTTP servers have their own set of
configuration for SSL/TLS which you can find in their documentation.
### --ca-cert string
This loads the PEM encoded certificate authority certificate and uses
it to verify the certificates of the servers rclone connects to.
If you have generated certificates signed with a local CA then you
will need this flag to connect to servers using those certificates.
### --client-cert string
This loads the PEM encoded client side certificate.
This is used for [mutual TLS authentication](https://en.wikipedia.org/wiki/Mutual_authentication).
The `--client-key` flag is required too when using this.
### --client-key string
This loads the PEM encoded client side private key used for mutual TLS
authentication. Used in conjunction with `--client-cert`.
### --no-check-certificate=true/false ###
`--no-check-certificate` controls whether a client verifies the
server's certificate chain and host name.
If `--no-check-certificate` is true, TLS accepts any certificate
presented by the server and any host name in that certificate.
In this mode, TLS is susceptible to man-in-the-middle attacks.
This option defaults to `false`.
**This should be used only for testing.**
Configuration Encryption Configuration Encryption
------------------------ ------------------------
Your configuration file contains information for logging in to Your configuration file contains information for logging in to
@@ -1147,36 +1230,6 @@ use it.
Write memory profile to file. This can be analysed with `go tool pprof`. Write memory profile to file. This can be analysed with `go tool pprof`.
### --no-check-certificate=true/false ###
`--no-check-certificate` controls whether a client verifies the
server's certificate chain and host name.
If `--no-check-certificate` is true, TLS accepts any certificate
presented by the server and any host name in that certificate.
In this mode, TLS is susceptible to man-in-the-middle attacks.
This option defaults to `false`.
**This should be used only for testing.**
### --no-traverse ###
The `--no-traverse` flag controls whether the destination file system
is traversed when using the `copy` or `move` commands.
`--no-traverse` is not compatible with `sync` and will be ignored if
you supply it with `sync`.
If you are only copying a small number of files (or are filtering most
of the files) and/or have a large number of files on the destination
then `--no-traverse` will stop rclone listing the destination and save
time.
However, if you are copying a large number of files, especially if you
are doing a copy where lots of the files under consideration haven't
changed and won't need copying then you shouldn't use `--no-traverse`.
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
Filtering Filtering
--------- ---------

View File

@@ -854,6 +854,15 @@ The most likely cause of this is the duplicated file issue above - run
`rclone dedupe` and check your logs for duplicate object or directory `rclone dedupe` and check your logs for duplicate object or directory
messages. messages.
This can also be caused by a delay/caching on google drive's end when
comparing directory listings. Specifically with team drives used in
combination with --fast-list. Files that were uploaded recently may
not appear on the directory list sent to rclone when using --fast-list.
Waiting a moderate period of time between attempts (estimated to be
approximately 1 hour) and/or not using --fast-list both seem to be
effective in preventing the problem.
### Making your own client_id ### ### Making your own client_id ###
When you use rclone with Google drive in its default configuration you When you use rclone with Google drive in its default configuration you

View File

@@ -188,3 +188,10 @@ causes not all domains to be resolved properly.
Additionally with the `GODEBUG=netdns=` environment variable the Go Additionally with the `GODEBUG=netdns=` environment variable the Go
resolver decision can be influenced. This also allows to resolve certain resolver decision can be influenced. This also allows to resolve certain
issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution). issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
### The total size reported in the stats for a sync is wrong and keeps changing
It is likely you have more than 10,000 files that need to be
synced. By default rclone only gets 10,000 files ahead in a sync so as
not to use up too much memory. You can change this default with the
[--max-backlog](/docs/#max-backlog-n) flag.

View File

@@ -217,6 +217,20 @@ the rclone config file, you can set `service_account_credentials` with
the actual contents of the file instead, or set the equivalent the actual contents of the file instead, or set the equivalent
environment variable. environment variable.
### Application Default Credentials ###
If no other source of credentials is provided, rclone will fall back
to
[Application Default Credentials](https://cloud.google.com/video-intelligence/docs/common/auth#authenticating_with_application_default_credentials)
this is useful both when you already have configured authentication
for your developer account, or in production when running on a google
compute host. Note that if running in docker, you may need to run
additional commands on your google compute machine -
[see this page](https://cloud.google.com/container-registry/docs/advanced-authentication#gcloud_as_a_docker_credential_helper).
Note that in the case application default credentials are used, there
is no need to explicitly configure a project number.
### --fast-list ### ### --fast-list ###
This remote supports `--fast-list` which allows you to use fewer This remote supports `--fast-list` which allows you to use fewer
@@ -328,6 +342,27 @@ Access Control List for new buckets.
- "publicReadWrite" - "publicReadWrite"
- Project team owners get OWNER access, and all Users get WRITER access. - Project team owners get OWNER access, and all Users get WRITER access.
#### --gcs-bucket-policy-only
Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
- Config: bucket_policy_only
- Env Var: RCLONE_GCS_BUCKET_POLICY_ONLY
- Type: bool
- Default: false
#### --gcs-location #### --gcs-location
Location for the newly created buckets. Location for the newly created buckets.

189
docs/content/koofr.md Normal file
View File

@@ -0,0 +1,189 @@
---
title: "Koofr"
description: "Rclone docs for Koofr"
date: "2019-02-25"
---
<i class="fa fa-suitcase"></i> Koofr
-----------------------------------------
Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
The initial setup for Koofr involves creating an application password for
rclone. You can do that by opening the Koofr
[web application](https://app.koofr.net/app/admin/preferences/password),
giving the password a nice name like `rclone` and clicking on generate.
Here is an example of how to make a remote called `koofr`. First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> koofr
Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / A stackable unification remote, which can appear to merge the contents of several remotes
\ "union"
2 / Alias for a existing remote
\ "alias"
3 / Amazon Drive
\ "amazon cloud drive"
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
\ "s3"
5 / Backblaze B2
\ "b2"
6 / Box
\ "box"
7 / Cache a remote
\ "cache"
8 / Dropbox
\ "dropbox"
9 / Encrypt/Decrypt a remote
\ "crypt"
10 / FTP Connection
\ "ftp"
11 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
12 / Google Drive
\ "drive"
13 / Hubic
\ "hubic"
14 / JottaCloud
\ "jottacloud"
15 / Koofr
\ "koofr"
16 / Local Disk
\ "local"
17 / Mega
\ "mega"
18 / Microsoft Azure Blob Storage
\ "azureblob"
19 / Microsoft OneDrive
\ "onedrive"
20 / OpenDrive
\ "opendrive"
21 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
22 / Pcloud
\ "pcloud"
23 / QingCloud Object Storage
\ "qingstor"
24 / SSH/SFTP Connection
\ "sftp"
25 / Webdav
\ "webdav"
26 / Yandex Disk
\ "yandex"
27 / http Connection
\ "http"
Storage> koofr
** See help for koofr backend at: https://rclone.org/koofr/ **
Your Koofr user name
Enter a string value. Press Enter for the default ("").
user> USER@NAME
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
y) Yes type in my own password
g) Generate random password
y/g> y
Enter the password:
password:
Confirm the password:
password:
Edit advanced config? (y/n)
y) Yes
n) No
y/n> n
Remote config
--------------------
[koofr]
type = koofr
baseurl = https://app.koofr.net
user = USER@NAME
password = *** ENCRYPTED ***
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
You can choose to edit advanced config in order to enter your own service URL
if you use an on-premise or white label Koofr instance, or choose an alternative
mount instead of your primary storage.
Once configured you can then use `rclone` like this,
List directories in top level of your Koofr
rclone lsd koofr:
List all the files in your Koofr
rclone ls koofr:
To copy a local directory to an Koofr directory called backup
rclone copy /home/source remote:backup
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/koofr/koofr.go then run make backenddocs -->
### Standard Options
Here are the standard options specific to koofr (Koofr).
#### --koofr-user
Your Koofr user name
- Config: user
- Env Var: RCLONE_KOOFR_USER
- Type: string
- Default: ""
#### --koofr-password
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
- Config: password
- Env Var: RCLONE_KOOFR_PASSWORD
- Type: string
- Default: ""
### Advanced Options
Here are the advanced options specific to koofr (Koofr).
#### --koofr-baseurl
Base URL of the Koofr API to connect to
- Config: baseurl
- Env Var: RCLONE_KOOFR_BASEURL
- Type: string
- Default: "https://app.koofr.net"
#### --koofr-mountid
Mount ID of the mount to use. If omitted, the primary mount is used.
- Config: mountid
- Env Var: RCLONE_KOOFR_MOUNTID
- Type: string
- Default: ""
<!--- autogenerated options stop -->
### Limitations ###
Note that Koofr is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".

View File

@@ -298,4 +298,13 @@ Description: Using application 'rclone' is currently not supported for your orga
This means that rclone can't use the OneDrive for Business API with your account. You can't do much about it, maybe write an email to your admins. This means that rclone can't use the OneDrive for Business API with your account. You can't do much about it, maybe write an email to your admins.
However, there are other ways to interact with your OneDrive account. Have a look at the webdav backend: https://rclone.org/webdav/#sharepoint However, there are other ways to interact with your OneDrive account. Have a look at the webdav backend: https://rclone.org/webdav/#sharepoint
```
Error: invalid_grant
Code: AADSTS50076
Description: Due to a configuration change made by your administrator, or because you moved to a new location, you must use multi-factor authentication to access '...'.
```
If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.

View File

@@ -2,7 +2,7 @@
title: "Overview of cloud storage systems" title: "Overview of cloud storage systems"
description: "Overview of cloud storage systems" description: "Overview of cloud storage systems"
type: page type: page
date: "2015-09-06" date: "2019-02-25"
--- ---
# Overview of cloud storage systems # # Overview of cloud storage systems #
@@ -28,6 +28,7 @@ Here is an overview of the major features of each cloud storage system.
| HTTP | - | No | No | No | R | | HTTP | - | No | No | No | R |
| Hubic | MD5 | Yes | No | No | R/W | | Hubic | MD5 | Yes | No | No | R/W |
| Jottacloud | MD5 | Yes | Yes | No | R/W | | Jottacloud | MD5 | Yes | Yes | No | R/W |
| Koofr | MD5 | No | Yes | No | - |
| Mega | - | No | No | Yes | - | | Mega | - | No | No | Yes | - |
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W | | Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R | | Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |

View File

@@ -556,6 +556,21 @@ This takes the following parameters
Authentication is required for this call. Authentication is required for this call.
### operations/publiclink: Create or retrieve a public link to the given file or folder.
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
Returns
- url - URL of the resource
See the [link command](/commands/rclone_link/) command for more information on the above.
Authentication is required for this call.
### operations/purge: Remove a directory or container and all of its contents ### operations/purge: Remove a directory or container and all of its contents
This takes the following parameters This takes the following parameters

View File

@@ -1112,6 +1112,11 @@ server_side_encryption =
storage_class = storage_class =
``` ```
If you are using an older version of CEPH, eg 10.2.x Jewel, then you
may need to supply the parameter `--s3-upload-cutoff 0` or put this in
the config file as `upload_cutoff 0` to work around a bug which causes
uploading of small files to fail.
Note also that Ceph sometimes puts `/` in the passwords it gives Note also that Ceph sometimes puts `/` in the passwords it gives
users. If you read the secret access key using the command line tools users. If you read the secret access key using the command line tools
you will get a JSON blob with the `/` escaped as `\/`. Make sure you you will get a JSON blob with the `/` escaped as `\/`. Make sure you

View File

@@ -2,7 +2,7 @@
<div class="row"> <div class="row">
<hr> <hr>
<div class="col-sm-12"> <div class="col-sm-12">
<p>&copy; <a href="https://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2017<br> <p>&copy; <a href="https://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2019<br>
Website hosted on a <a href="https://www.memset.com/dedicated-servers/vps/"><span style="font-weight: bold; font-family: arial black, arial, sans-serif; font-style: italic;">MEMSET CLOUD VPS</span></a>, Website hosted on a <a href="https://www.memset.com/dedicated-servers/vps/"><span style="font-weight: bold; font-family: arial black, arial, sans-serif; font-style: italic;">MEMSET CLOUD VPS</span></a>,
uploaded with <a href="https://rclone.org">rclone</a> uploaded with <a href="https://rclone.org">rclone</a>
and built with <a href="https://github.com/spf13/hugo">Hugo</a></p> and built with <a href="https://github.com/spf13/hugo">Hugo</a></p>

View File

@@ -67,6 +67,7 @@
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li> <li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li> <li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li> <li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li> <li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li> <li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li> <li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>

View File

@@ -201,8 +201,9 @@ func (s *StatsInfo) String() string {
} }
dtRounded := dt - (dt % (time.Second / 10)) dtRounded := dt - (dt % (time.Second / 10))
displaySpeed := speed
if fs.Config.DataRateUnit == "bits" { if fs.Config.DataRateUnit == "bits" {
speed = speed * 8 displaySpeed *= 8
} }
var ( var (
@@ -235,7 +236,7 @@ func (s *StatsInfo) String() string {
fs.SizeSuffix(s.bytes), fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"), fs.SizeSuffix(totalSize).Unit("Bytes"),
percent(s.bytes, totalSize), percent(s.bytes, totalSize),
fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"), fs.SizeSuffix(displaySpeed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
etaString(currentSize, totalSize, speed), etaString(currentSize, totalSize, speed),
xfrchkString, xfrchkString,
) )

View File

@@ -67,6 +67,7 @@ type ConfigInfo struct {
DataRateUnit string DataRateUnit string
BackupDir string BackupDir string
Suffix string Suffix string
SuffixKeepExtension bool
UseListR bool UseListR bool
BufferSize SizeSuffix BufferSize SizeSuffix
BwLimit BwTimetable BwLimit BwTimetable
@@ -87,6 +88,9 @@ type ConfigInfo struct {
Progress bool Progress bool
Cookie bool Cookie bool
UseMmap bool UseMmap bool
CaCert string // Client Side CA
ClientCert string // Client Side Cert
ClientKey string // Client Side Key
} }
// NewConfig creates a new config with everything set to the default // NewConfig creates a new config with everything set to the default

View File

@@ -68,6 +68,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.") flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.") flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.") flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.")
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.") flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.") flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.") flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
@@ -89,6 +90,9 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.") flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.") flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).") flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
} }
// SetFlags converts any flags into config which weren't straight foward // SetFlags converts any flags into config which weren't straight foward

View File

@@ -21,8 +21,9 @@ var Active = mustNewFilter(nil)
// rule is one filter rule // rule is one filter rule
type rule struct { type rule struct {
Include bool Include bool
Regexp *regexp.Regexp Regexp *regexp.Regexp
boundedRecursion bool
} }
// Match returns true if rule matches path // Match returns true if rule matches path
@@ -46,13 +47,14 @@ type rules struct {
} }
// add adds a rule if it doesn't exist already // add adds a rule if it doesn't exist already
func (rs *rules) add(Include bool, re *regexp.Regexp) { func (rs *rules) add(Include bool, re *regexp.Regexp, boundedRecursion bool) {
if rs.existing == nil { if rs.existing == nil {
rs.existing = make(map[string]struct{}) rs.existing = make(map[string]struct{})
} }
newRule := rule{ newRule := rule{
Include: Include, Include: Include,
Regexp: re, Regexp: re,
boundedRecursion: boundedRecursion,
} }
newRuleString := newRule.String() newRuleString := newRule.String()
if _, ok := rs.existing[newRuleString]; ok { if _, ok := rs.existing[newRuleString]; ok {
@@ -73,6 +75,23 @@ func (rs *rules) len() int {
return len(rs.rules) return len(rs.rules)
} }
// boundedRecursion returns true if the set of filters would only
// need bounded recursion to evaluate
func (rs *rules) boundedRecursion() bool {
var (
excludeAll = false
boundedRecursion = true
)
for _, rule := range rs.rules {
if rule.Include {
boundedRecursion = boundedRecursion && rule.boundedRecursion
} else if rule.Regexp.String() == `^.*$` {
excludeAll = true
}
}
return excludeAll && boundedRecursion
}
// FilesMap describes the map of files to transfer // FilesMap describes the map of files to transfer
type FilesMap map[string]struct{} type FilesMap map[string]struct{}
@@ -232,7 +251,8 @@ func (f *Filter) addDirGlobs(Include bool, glob string) error {
if err != nil { if err != nil {
return err return err
} }
f.dirRules.add(Include, dirRe) boundedRecursion := globBoundedRecursion(dirGlob)
f.dirRules.add(Include, dirRe, boundedRecursion)
} }
return nil return nil
} }
@@ -248,8 +268,9 @@ func (f *Filter) Add(Include bool, glob string) error {
if err != nil { if err != nil {
return err return err
} }
boundedRecursion := globBoundedRecursion(glob)
if isFileRule { if isFileRule {
f.fileRules.add(Include, re) f.fileRules.add(Include, re, boundedRecursion)
// If include rule work out what directories are needed to scan // If include rule work out what directories are needed to scan
// if exclude rule, we can't rule anything out // if exclude rule, we can't rule anything out
// Unless it is `*` which matches everything // Unless it is `*` which matches everything
@@ -262,7 +283,7 @@ func (f *Filter) Add(Include bool, glob string) error {
} }
} }
if isDirRule { if isDirRule {
f.dirRules.add(Include, re) f.dirRules.add(Include, re, boundedRecursion)
} }
return nil return nil
} }
@@ -343,6 +364,12 @@ func (f *Filter) InActive() bool {
len(f.Opt.ExcludeFile) == 0) len(f.Opt.ExcludeFile) == 0)
} }
// BoundedRecursion returns true if the filter can be evaluated with
// bounded recursion only.
func (f *Filter) BoundedRecursion() bool {
return f.fileRules.boundedRecursion()
}
// includeRemote returns whether this remote passes the filter rules. // includeRemote returns whether this remote passes the filter rules.
func (f *Filter) includeRemote(remote string) bool { func (f *Filter) includeRemote(remote string) bool {
for _, rule := range f.fileRules.rules { for _, rule := range f.fileRules.rules {

View File

@@ -25,6 +25,7 @@ func TestNewFilterDefault(t *testing.T) {
assert.Len(t, f.dirRules.rules, 0) assert.Len(t, f.dirRules.rules, 0)
assert.Nil(t, f.files) assert.Nil(t, f.files)
assert.True(t, f.InActive()) assert.True(t, f.InActive())
assert.False(t, f.BoundedRecursion())
} }
// testFile creates a temp file with the contents // testFile creates a temp file with the contents
@@ -103,6 +104,38 @@ func TestNewFilterFull(t *testing.T) {
} }
} }
assert.False(t, f.InActive()) assert.False(t, f.InActive())
assert.False(t, f.BoundedRecursion())
}
func TestFilterBoundedRecursion(t *testing.T) {
for _, test := range []struct {
in string
want bool
}{
{"", false},
{"- /**", true},
{"+ *.jpg", false},
{"+ *.jpg\n- /**", false},
{"+ /*.jpg\n- /**", true},
{"+ *.png\n+ /*.jpg\n- /**", false},
{"+ /*.png\n+ /*.jpg\n- /**", true},
{"- *.jpg\n- /**", true},
{"+ /*.jpg\n- /**", true},
{"+ /*dir/\n- /**", true},
{"+ /*dir/\n", false},
{"+ /*dir/**\n- /**", false},
{"+ **/pics*/*.jpg\n- /**", false},
} {
f, err := NewFilter(nil)
require.NoError(t, err)
for _, rule := range strings.Split(test.in, "\n") {
if rule != "" {
require.NoError(t, f.AddRule(rule))
}
}
got := f.BoundedRecursion()
assert.Equal(t, test.want, got, test.in)
}
} }
type includeTest struct { type includeTest struct {
@@ -151,6 +184,7 @@ func TestNewFilterIncludeFiles(t *testing.T) {
{"file3.jpg", 3, 0, false}, {"file3.jpg", 3, 0, false},
}) })
assert.False(t, f.InActive()) assert.False(t, f.InActive())
assert.False(t, f.BoundedRecursion())
} }
func TestNewFilterIncludeFilesDirs(t *testing.T) { func TestNewFilterIncludeFilesDirs(t *testing.T) {
@@ -278,6 +312,7 @@ func TestNewFilterMinSize(t *testing.T) {
{"potato/file2.jpg", 99, 0, false}, {"potato/file2.jpg", 99, 0, false},
}) })
assert.False(t, f.InActive()) assert.False(t, f.InActive())
assert.False(t, f.BoundedRecursion())
} }
func TestNewFilterMaxSize(t *testing.T) { func TestNewFilterMaxSize(t *testing.T) {

View File

@@ -13,9 +13,15 @@ var (
Opt = filter.DefaultOpt Opt = filter.DefaultOpt
) )
// Reload the filters from the flags
func Reload() (err error) {
filter.Active, err = filter.NewFilter(&Opt)
return err
}
// AddFlags adds the non filing system specific flags to the command // AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) { func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("filter", &Opt) rc.AddOptionReload("filter", &Opt, Reload)
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync") flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule") flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file") flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")

View File

@@ -167,3 +167,15 @@ func globToDirGlobs(glob string) (out []string) {
return out return out
} }
// globBoundedRecursion returns true if the glob only needs bounded
// recursion in the file tree to evaluate.
func globBoundedRecursion(glob string) bool {
if strings.Contains(glob, "**") {
return false
}
if strings.HasPrefix(glob, "/") {
return true
}
return false
}

View File

@@ -108,3 +108,45 @@ func TestGlobToDirGlobs(t *testing.T) {
assert.Equal(t, test.want, got, test.in) assert.Equal(t, test.want, got, test.in)
} }
} }
func TestGlobBoundedRecursion(t *testing.T) {
for _, test := range []struct {
in string
want bool
}{
{`*`, false},
{`/*`, true},
{`/**`, false},
{`*.jpg`, false},
{`/*.jpg`, true},
{`/a/*.jpg`, true},
{`/a/b/*.jpg`, true},
{`*/*/*.jpg`, false},
{`a/b/`, false},
{`a/b`, false},
{`a/b/*.{png,gif}`, false},
{`/a/{jpg,png,gif}/*.{jpg,true,gif}`, true},
{`a/{a,a*b,a**c}/d/`, false},
{`/a/{a,a*b,a/c,d}/d/`, true},
{`**`, false},
{`a**`, false},
{`a**b`, false},
{`a**b**c**d`, false},
{`a**b/c**d`, false},
{`/A/a**b/B/c**d/C/`, false},
{`/var/spool/**/ncw`, false},
{`var/spool/**/ncw/`, false},
{"/file1.jpg", true},
{"/file2.png", true},
{"/*.jpg", true},
{"/*.png", true},
{"/potato", true},
{"/sausage1", true},
{"/sausage2*", true},
{"/sausage3**", false},
{"/a/*.jpg", true},
} {
got := globBoundedRecursion(test.in)
assert.Equal(t, test.want, got, test.in)
}
}

View File

@@ -16,8 +16,10 @@ import (
"github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fspath" "github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -59,7 +61,7 @@ var (
ErrorNotAFile = errors.New("is a not a regular file") ErrorNotAFile = errors.New("is a not a regular file")
ErrorNotDeleting = errors.New("not deleting files as there were IO errors") ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors") ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
ErrorCantMoveOverlapping = errors.New("can't move files on overlapping remotes") ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
ErrorDirectoryNotEmpty = errors.New("directory not empty") ErrorDirectoryNotEmpty = errors.New("directory not empty")
ErrorImmutableModified = errors.New("immutable file modified") ErrorImmutableModified = errors.New("immutable file modified")
ErrorPermissionDenied = errors.New("permission denied") ErrorPermissionDenied = errors.New("permission denied")
@@ -407,6 +409,7 @@ type Features struct {
BucketBased bool // is bucket based (like s3, swift etc) BucketBased bool // is bucket based (like s3, swift etc)
SetTier bool // allows set tier functionality on objects SetTier bool // allows set tier functionality on objects
GetTier bool // allows to retrieve storage tier of objects GetTier bool // allows to retrieve storage tier of objects
ServerSideAcrossConfigs bool // can server side copy between different remotes of the same type
// Purge all files in the root and the root directory // Purge all files in the root and the root directory
// //
@@ -1112,3 +1115,81 @@ func GetModifyWindow(fss ...Info) time.Duration {
} }
return window return window
} }
// Pacer is a simple wrapper around a pacer.Pacer with logging.
type Pacer struct {
*pacer.Pacer
}
type logCalculator struct {
pacer.Calculator
}
// NewPacer creates a Pacer for the given Fs and Calculator.
func NewPacer(c pacer.Calculator) *Pacer {
p := &Pacer{
Pacer: pacer.New(
pacer.InvokerOption(pacerInvoker),
pacer.MaxConnectionsOption(Config.Checkers+Config.Transfers),
pacer.RetriesOption(Config.LowLevelRetries),
pacer.CalculatorOption(c),
),
}
p.SetCalculator(c)
return p
}
func (d *logCalculator) Calculate(state pacer.State) time.Duration {
oldSleepTime := state.SleepTime
newSleepTime := d.Calculator.Calculate(state)
if state.ConsecutiveRetries > 0 {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Rate limited, increasing sleep to %v", newSleepTime)
}
} else {
if newSleepTime != oldSleepTime {
Debugf("pacer", "Reducing sleep to %v", newSleepTime)
}
}
return newSleepTime
}
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
// afterwards, use the ModifyCalculator method when needed.
//
// It will choose the default algorithm if nil is passed in.
func (p *Pacer) SetCalculator(c pacer.Calculator) {
switch c.(type) {
case *logCalculator:
Logf("pacer", "Invalid Calculator in fs.Pacer.SetCalculator")
case nil:
c = &logCalculator{pacer.NewDefault()}
default:
c = &logCalculator{c}
}
p.Pacer.SetCalculator(c)
}
// ModifyCalculator calls the given function with the currently configured
// Calculator and the Pacer lock held.
func (p *Pacer) ModifyCalculator(f func(pacer.Calculator)) {
p.ModifyCalculator(func(c pacer.Calculator) {
switch _c := c.(type) {
case *logCalculator:
f(_c.Calculator)
default:
Logf("pacer", "Invalid Calculator in fs.Pacer: %t", c)
f(c)
}
})
}
func pacerInvoker(try, retries int, f pacer.Paced) (retry bool, err error) {
retry, err = f()
if retry {
Debugf("pacer", "low level retry %d/%d (error %v)", try, retries, err)
err = fserrors.RetryError(err)
}
return
}

View File

@@ -2,8 +2,15 @@ package fs
import ( import (
"strings" "strings"
"sync"
"testing" "testing"
"time"
"github.com/stretchr/testify/require"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -70,3 +77,47 @@ func TestOption(t *testing.T) {
err = d.Set("sdfsdf") err = d.Set("sdfsdf")
assert.Error(t, err) assert.Error(t, err)
} }
var errFoo = errors.New("foo")
type dummyPaced struct {
retry bool
called int
wait *sync.Cond
}
func (dp *dummyPaced) fn() (bool, error) {
if dp.wait != nil {
dp.wait.L.Lock()
dp.wait.Wait()
dp.wait.L.Unlock()
}
dp.called++
return dp.retry, errFoo
}
func TestPacerCall(t *testing.T) {
expectedCalled := Config.LowLevelRetries
if expectedCalled == 0 {
expectedCalled = 20
Config.LowLevelRetries = expectedCalled
defer func() {
Config.LowLevelRetries = 0
}()
}
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true}
err := p.Call(dp.fn)
require.Equal(t, expectedCalled, dp.called)
require.Implements(t, (*fserrors.Retrier)(nil), err)
}
func TestPacerCallNoRetry(t *testing.T) {
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true}
err := p.CallNoRetry(dp.fn)
require.Equal(t, 1, dp.called)
require.Implements(t, (*fserrors.Retrier)(nil), err)
}

View File

@@ -194,7 +194,7 @@ func Cause(cause error) (retriable bool, err error) {
// this case. // this case.
err = prev err = prev
} }
if err == prev { if reflect.DeepEqual(err, prev) {
// Unpack any struct or *struct with a field // Unpack any struct or *struct with a field
// of name Err which satisfies the error // of name Err which satisfies the error
// interface. This includes *url.Error, // interface. This includes *url.Error,
@@ -215,7 +215,7 @@ func Cause(cause error) (retriable bool, err error) {
} }
} }
} }
if err == prev { if reflect.DeepEqual(err, prev) {
break break
} }
} }

View File

@@ -5,6 +5,9 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/tls" "crypto/tls"
"crypto/x509"
"io/ioutil"
"log"
"net" "net"
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
@@ -130,7 +133,39 @@ func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
t.TLSHandshakeTimeout = ci.ConnectTimeout t.TLSHandshakeTimeout = ci.ConnectTimeout
t.ResponseHeaderTimeout = ci.Timeout t.ResponseHeaderTimeout = ci.Timeout
t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
// TLS Config
t.TLSClientConfig = &tls.Config{
InsecureSkipVerify: ci.InsecureSkipVerify,
}
// Load client certs
if ci.ClientCert != "" || ci.ClientKey != "" {
if ci.ClientCert == "" || ci.ClientKey == "" {
log.Fatalf("Both --client-cert and --client-key must be set")
}
cert, err := tls.LoadX509KeyPair(ci.ClientCert, ci.ClientKey)
if err != nil {
log.Fatalf("Failed to load --client-cert/--client-key pair: %v", err)
}
t.TLSClientConfig.Certificates = []tls.Certificate{cert}
t.TLSClientConfig.BuildNameToCertificate()
}
// Load CA cert
if ci.CaCert != "" {
caCert, err := ioutil.ReadFile(ci.CaCert)
if err != nil {
log.Fatalf("Failed to read --ca-cert: %v", err)
}
caCertPool := x509.NewCertPool()
ok := caCertPool.AppendCertsFromPEM(caCert)
if !ok {
log.Fatalf("Failed to add certificates from --ca-cert")
}
t.TLSClientConfig.RootCAs = caCertPool
}
t.DisableCompression = ci.NoGzip t.DisableCompression = ci.NoGzip
t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialContextTimeout(ctx, network, addr, ci) return dialContextTimeout(ctx, network, addr, ci)

View File

@@ -191,25 +191,22 @@ var _ pflag.Value = (*DeduplicateMode)(nil)
// dedupeFindDuplicateDirs scans f for duplicate directories // dedupeFindDuplicateDirs scans f for duplicate directories
func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) { func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) {
duplicateDirs := [][]fs.Directory{} dirs := map[string][]fs.Directory{}
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error {
if err != nil {
return err
}
dirs := map[string][]fs.Directory{}
entries.ForDir(func(d fs.Directory) { entries.ForDir(func(d fs.Directory) {
dirs[d.Remote()] = append(dirs[d.Remote()], d) dirs[d.Remote()] = append(dirs[d.Remote()], d)
}) })
for _, ds := range dirs {
if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds)
}
}
return nil return nil
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "find duplicate dirs") return nil, errors.Wrap(err, "find duplicate dirs")
} }
duplicateDirs := [][]fs.Directory{}
for _, ds := range dirs {
if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds)
}
}
return duplicateDirs, nil return duplicateDirs, nil
} }
@@ -268,10 +265,7 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
// Now find duplicate files // Now find duplicate files
files := map[string][]fs.Object{} files := map[string][]fs.Object{}
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
if err != nil {
return err
}
entries.ForObject(func(o fs.Object) { entries.ForObject(func(o fs.Object) {
remote := o.Remote() remote := o.Remote()
files[remote] = append(files[remote], o) files[remote] = append(files[remote], o)

View File

@@ -161,10 +161,7 @@ func TestDeduplicateRename(t *testing.T) {
err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename) err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error { require.NoError(t, walk.ListR(r.Fremote, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
if err != nil {
return err
}
entries.ForObject(func(o fs.Object) { entries.ForObject(func(o fs.Object) {
remote := o.Remote() remote := o.Remote()
if remote != "one-1.txt" && if remote != "one-1.txt" &&

View File

@@ -89,12 +89,7 @@ func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJS
} }
} }
format := formatForPrecision(fsrc.Precision()) format := formatForPrecision(fsrc.Precision())
err := walk.Walk(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), func(dirPath string, entries fs.DirEntries, err error) error { err := walk.ListR(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) {
if err != nil {
fs.CountError(err)
fs.Errorf(dirPath, "error listing: %v", err)
return nil
}
for _, entry := range entries { for _, entry := range entries {
item := ListJSONItem{ item := ListJSONItem{
Path: entry.Remote(), Path: entry.Remote(),

View File

@@ -10,6 +10,7 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"path" "path"
"path/filepath"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@@ -272,7 +273,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
// Try server side copy first - if has optional interface and // Try server side copy first - if has optional interface and
// is same underlying remote // is same underlying remote
actionTaken = "Copied (server side copy)" actionTaken = "Copied (server side copy)"
if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) { if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
newDst, err = doCopy(src, remote) newDst, err = doCopy(src, remote)
if err == nil { if err == nil {
dst = newDst dst = newDst
@@ -283,7 +284,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
// If can't server side copy, do it manually // If can't server side copy, do it manually
if err == fs.ErrorCantCopy { if err == fs.ErrorCantCopy {
var in0 io.ReadCloser var in0 io.ReadCloser
in0, err = src.Open(hashOption) in0, err = newReOpen(src, hashOption, fs.Config.LowLevelRetries)
if err != nil { if err != nil {
err = errors.Wrap(err, "failed to open source object") err = errors.Wrap(err, "failed to open source object")
} else { } else {
@@ -391,7 +392,7 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob
return newDst, nil return newDst, nil
} }
// See if we have Move available // See if we have Move available
if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) { if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
// Delete destination if it exists // Delete destination if it exists
if dst != nil { if dst != nil {
err = DeleteFile(dst) err = DeleteFile(dst)
@@ -434,6 +435,20 @@ func CanServerSideMove(fdst fs.Fs) bool {
return canMove || canCopy return canMove || canCopy
} }
// SuffixName adds the current --suffix to the remote, obeying
// --suffix-keep-extension if set
func SuffixName(remote string) string {
if fs.Config.Suffix == "" {
return remote
}
if fs.Config.SuffixKeepExtension {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
return base + fs.Config.Suffix + ext
}
return remote + fs.Config.Suffix
}
// DeleteFileWithBackupDir deletes a single file respecting --dry-run // DeleteFileWithBackupDir deletes a single file respecting --dry-run
// and accumulating stats and errors. // and accumulating stats and errors.
// //
@@ -455,7 +470,7 @@ func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) {
if !SameConfig(dst.Fs(), backupDir) { if !SameConfig(dst.Fs(), backupDir) {
err = errors.New("parameter to --backup-dir has to be on the same remote as destination") err = errors.New("parameter to --backup-dir has to be on the same remote as destination")
} else { } else {
remoteWithSuffix := dst.Remote() + fs.Config.Suffix remoteWithSuffix := SuffixName(dst.Remote())
overwritten, _ := backupDir.NewObject(remoteWithSuffix) overwritten, _ := backupDir.NewObject(remoteWithSuffix)
_, err = Move(backupDir, overwritten, remoteWithSuffix, dst) _, err = Move(backupDir, overwritten, remoteWithSuffix, dst)
} }
@@ -524,6 +539,11 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
return DeleteFilesWithBackupDir(toBeDeleted, nil) return DeleteFilesWithBackupDir(toBeDeleted, nil)
} }
// SameRemoteType returns true if fdst and fsrc are the same type
func SameRemoteType(fdst, fsrc fs.Info) bool {
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
}
// SameConfig returns true if fdst and fsrc are using the same config // SameConfig returns true if fdst and fsrc are using the same config
// file entry // file entry
func SameConfig(fdst, fsrc fs.Info) bool { func SameConfig(fdst, fsrc fs.Info) bool {
@@ -532,7 +552,7 @@ func SameConfig(fdst, fsrc fs.Info) bool {
// Same returns true if fdst and fsrc point to the same underlying Fs // Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc fs.Info) bool { func Same(fdst, fsrc fs.Info) bool {
return SameConfig(fdst, fsrc) && fdst.Root() == fsrc.Root() return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
} }
// Overlapping returns true if fdst and fsrc point to the same // Overlapping returns true if fdst and fsrc point to the same
@@ -543,7 +563,7 @@ func Overlapping(fdst, fsrc fs.Info) bool {
} }
// Return the Root with a trailing / if not empty // Return the Root with a trailing / if not empty
fixedRoot := func(f fs.Info) string { fixedRoot := func(f fs.Info) string {
s := strings.Trim(f.Root(), "/") s := strings.Trim(filepath.ToSlash(f.Root()), "/")
if s != "" { if s != "" {
s += "/" s += "/"
} }
@@ -810,11 +830,7 @@ func CheckDownload(fdst, fsrc fs.Fs, oneway bool) error {
// //
// Lists in parallel which may get them out of order // Lists in parallel which may get them out of order
func ListFn(f fs.Fs, fn func(fs.Object)) error { func ListFn(f fs.Fs, fn func(fs.Object)) error {
return walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { return walk.ListR(f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
if err != nil {
// FIXME count errors and carry on for listing
return err
}
entries.ForObject(fn) entries.ForObject(fn)
return nil return nil
}) })
@@ -930,11 +946,7 @@ func ConfigMaxDepth(recursive bool) int {
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer // ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(f fs.Fs, w io.Writer) error { func ListDir(f fs.Fs, w io.Writer) error {
return walk.Walk(f, "", false, ConfigMaxDepth(false), func(dirPath string, entries fs.DirEntries, err error) error { return walk.ListR(f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error {
if err != nil {
// FIXME count errors and carry on for listing
return err
}
entries.ForDir(func(dir fs.Directory) { entries.ForDir(func(dir fs.Directory) {
if dir != nil { if dir != nil {
syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime().Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime().Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
@@ -1042,21 +1054,17 @@ func listToChan(f fs.Fs, dir string) fs.ObjectsChan {
o := make(fs.ObjectsChan, fs.Config.Checkers) o := make(fs.ObjectsChan, fs.Config.Checkers)
go func() { go func() {
defer close(o) defer close(o)
_ = walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { err := walk.ListR(f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
if err != nil {
if err == fs.ErrorDirNotFound {
return nil
}
err = errors.Errorf("Failed to list: %v", err)
fs.CountError(err)
fs.Errorf(nil, "%v", err)
return nil
}
entries.ForObject(func(obj fs.Object) { entries.ForObject(func(obj fs.Object) {
o <- obj o <- obj
}) })
return nil return nil
}) })
if err != nil && err != fs.ErrorDirNotFound {
err = errors.Wrap(err, "failed to list")
fs.CountError(err)
fs.Errorf(nil, "%v", err)
}
}() }()
return o return o
} }
@@ -1479,8 +1487,7 @@ type ListFormat struct {
separator string separator string
dirSlash bool dirSlash bool
absolute bool absolute bool
output []func() string output []func(entry *ListJSONItem) string
entry fs.DirEntry
csv *csv.Writer csv *csv.Writer
buf bytes.Buffer buf bytes.Buffer
} }
@@ -1516,76 +1523,91 @@ func (l *ListFormat) SetCSV(useCSV bool) {
} }
// SetOutput sets functions used to create files information // SetOutput sets functions used to create files information
func (l *ListFormat) SetOutput(output []func() string) { func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
l.output = output l.output = output
} }
// AddModTime adds file's Mod Time to output // AddModTime adds file's Mod Time to output
func (l *ListFormat) AddModTime() { func (l *ListFormat) AddModTime() {
l.AppendOutput(func() string { return l.entry.ModTime().Local().Format("2006-01-02 15:04:05") }) l.AppendOutput(func(entry *ListJSONItem) string {
return entry.ModTime.When.Local().Format("2006-01-02 15:04:05")
})
} }
// AddSize adds file's size to output // AddSize adds file's size to output
func (l *ListFormat) AddSize() { func (l *ListFormat) AddSize() {
l.AppendOutput(func() string { l.AppendOutput(func(entry *ListJSONItem) string {
return strconv.FormatInt(l.entry.Size(), 10) return strconv.FormatInt(entry.Size, 10)
}) })
} }
// normalisePath makes sure the path has the correct slashes for the current mode
func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string {
if l.absolute && !strings.HasPrefix(remote, "/") {
remote = "/" + remote
}
if entry.IsDir && l.dirSlash {
remote += "/"
}
return remote
}
// AddPath adds path to file to output // AddPath adds path to file to output
func (l *ListFormat) AddPath() { func (l *ListFormat) AddPath() {
l.AppendOutput(func() string { l.AppendOutput(func(entry *ListJSONItem) string {
remote := l.entry.Remote() return l.normalisePath(entry, entry.Path)
if l.absolute && !strings.HasPrefix(remote, "/") { })
remote = "/" + remote }
}
_, isDir := l.entry.(fs.Directory) // AddEncrypted adds the encrypted path to file to output
if isDir && l.dirSlash { func (l *ListFormat) AddEncrypted() {
remote += "/" l.AppendOutput(func(entry *ListJSONItem) string {
} return l.normalisePath(entry, entry.Encrypted)
return remote
}) })
} }
// AddHash adds the hash of the type given to the output // AddHash adds the hash of the type given to the output
func (l *ListFormat) AddHash(ht hash.Type) { func (l *ListFormat) AddHash(ht hash.Type) {
l.AppendOutput(func() string { hashName := ht.String()
o, ok := l.entry.(fs.Object) l.AppendOutput(func(entry *ListJSONItem) string {
if !ok { if entry.IsDir {
return "" return ""
} }
return hashSum(ht, o) return entry.Hashes[hashName]
}) })
} }
// AddID adds file's ID to the output if known // AddID adds file's ID to the output if known
func (l *ListFormat) AddID() { func (l *ListFormat) AddID() {
l.AppendOutput(func() string { l.AppendOutput(func(entry *ListJSONItem) string {
if do, ok := l.entry.(fs.IDer); ok { return entry.ID
return do.ID() })
} }
return ""
// AddOrigID adds file's Original ID to the output if known
func (l *ListFormat) AddOrigID() {
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.OrigID
}) })
} }
// AddMimeType adds file's MimeType to the output if known // AddMimeType adds file's MimeType to the output if known
func (l *ListFormat) AddMimeType() { func (l *ListFormat) AddMimeType() {
l.AppendOutput(func() string { l.AppendOutput(func(entry *ListJSONItem) string {
return fs.MimeTypeDirEntry(l.entry) return entry.MimeType
}) })
} }
// AppendOutput adds string generated by specific function to printed output // AppendOutput adds string generated by specific function to printed output
func (l *ListFormat) AppendOutput(functionToAppend func() string) { func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) {
l.output = append(l.output, functionToAppend) l.output = append(l.output, functionToAppend)
} }
// Format prints information about the DirEntry in the format defined // Format prints information about the DirEntry in the format defined
func (l *ListFormat) Format(entry fs.DirEntry) (result string) { func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
l.entry = entry
var out []string var out []string
for _, fun := range l.output { for _, fun := range l.output {
out = append(out, fun()) out = append(out, fun(entry))
} }
if l.csv != nil { if l.csv != nil {
l.buf.Reset() l.buf.Reset()

View File

@@ -39,7 +39,6 @@ import (
"github.com/ncw/rclone/fs/accounting" "github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/filter" "github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -232,6 +231,33 @@ func TestHashSums(t *testing.T) {
} }
} }
func TestSuffixName(t *testing.T) {
origSuffix, origKeepExt := fs.Config.Suffix, fs.Config.SuffixKeepExtension
defer func() {
fs.Config.Suffix, fs.Config.SuffixKeepExtension = origSuffix, origKeepExt
}()
for _, test := range []struct {
remote string
suffix string
keepExt bool
want string
}{
{"test.txt", "", false, "test.txt"},
{"test.txt", "", true, "test.txt"},
{"test.txt", "-suffix", false, "test.txt-suffix"},
{"test.txt", "-suffix", true, "test-suffix.txt"},
{"test.txt.csv", "-suffix", false, "test.txt.csv-suffix"},
{"test.txt.csv", "-suffix", true, "test.txt-suffix.csv"},
{"test", "-suffix", false, "test-suffix"},
{"test", "-suffix", true, "test-suffix"},
} {
fs.Config.Suffix = test.suffix
fs.Config.SuffixKeepExtension = test.keepExt
got := operations.SuffixName(test.remote)
assert.Equal(t, test.want, got, fmt.Sprintf("%+v", test))
}
}
func TestCount(t *testing.T) { func TestCount(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@@ -778,6 +804,7 @@ func TestSame(t *testing.T) {
func TestOverlapping(t *testing.T) { func TestOverlapping(t *testing.T) {
a := &testFsInfo{name: "name", root: "root"} a := &testFsInfo{name: "name", root: "root"}
slash := string(os.PathSeparator) // native path separator
for _, test := range []struct { for _, test := range []struct {
name string name string
root string root string
@@ -790,6 +817,8 @@ func TestOverlapping(t *testing.T) {
{"name", "roo", false}, {"name", "roo", false},
{"name", "root/toot", true}, {"name", "root/toot", true},
{"name", "root/toot/", true}, {"name", "root/toot/", true},
{"name", "root" + slash + "toot", true},
{"name", "root" + slash + "toot" + slash, true},
{"name", "", true}, {"name", "", true},
{"name", "/", true}, {"name", "/", true},
} { } {
@@ -873,61 +902,90 @@ func TestCheckEqualReaders(t *testing.T) {
} }
func TestListFormat(t *testing.T) { func TestListFormat(t *testing.T) {
r := fstest.NewRun(t) item0 := &operations.ListJSONItem{
defer r.Finalise() Path: "a",
file1 := r.WriteObject("a", "a", t1) Name: "a",
file2 := r.WriteObject("subdir/b", "b", t1) Encrypted: "encryptedFileName",
Size: 1,
MimeType: "application/octet-stream",
ModTime: operations.Timestamp{
When: t1,
Format: "2006-01-02T15:04:05.000000000Z07:00"},
IsDir: false,
Hashes: map[string]string{
"MD5": "0cc175b9c0f1b6a831c399e269772661",
"SHA-1": "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8",
"DropboxHash": "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8",
"QuickXorHash": "6100000000000000000000000100000000000000"},
ID: "fileID",
OrigID: "fileOrigID",
}
fstest.CheckItems(t, r.Fremote, file1, file2) item1 := &operations.ListJSONItem{
Path: "subdir",
Name: "subdir",
Encrypted: "encryptedDirName",
Size: -1,
MimeType: "inode/directory",
ModTime: operations.Timestamp{
When: t2,
Format: "2006-01-02T15:04:05.000000000Z07:00"},
IsDir: true,
Hashes: map[string]string(nil),
ID: "dirID",
OrigID: "dirOrigID",
}
items, _ := list.DirSorted(r.Fremote, true, "")
var list operations.ListFormat var list operations.ListFormat
list.AddPath() list.AddPath()
list.SetDirSlash(false) list.SetDirSlash(false)
assert.Equal(t, "subdir", list.Format(items[1])) assert.Equal(t, "subdir", list.Format(item1))
list.SetDirSlash(true) list.SetDirSlash(true)
assert.Equal(t, "subdir/", list.Format(items[1])) assert.Equal(t, "subdir/", list.Format(item1))
list.SetOutput(nil) list.SetOutput(nil)
assert.Equal(t, "", list.Format(items[1])) assert.Equal(t, "", list.Format(item1))
list.AppendOutput(func() string { return "a" }) list.AppendOutput(func(item *operations.ListJSONItem) string { return "a" })
list.AppendOutput(func() string { return "b" }) list.AppendOutput(func(item *operations.ListJSONItem) string { return "b" })
assert.Equal(t, "ab", list.Format(items[1])) assert.Equal(t, "ab", list.Format(item1))
list.SetSeparator(":::") list.SetSeparator(":::")
assert.Equal(t, "a:::b", list.Format(items[1])) assert.Equal(t, "a:::b", list.Format(item1))
list.SetOutput(nil) list.SetOutput(nil)
list.AddModTime() list.AddModTime()
assert.Equal(t, items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0])) assert.Equal(t, t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
list.SetOutput(nil) list.SetOutput(nil)
list.SetSeparator("|")
list.AddID() list.AddID()
_ = list.Format(items[0]) // Can't really check anything - at least it didn't panic! list.AddOrigID()
assert.Equal(t, "fileID|fileOrigID", list.Format(item0))
assert.Equal(t, "dirID|dirOrigID", list.Format(item1))
list.SetOutput(nil) list.SetOutput(nil)
list.AddMimeType() list.AddMimeType()
assert.Contains(t, list.Format(items[0]), "/") assert.Contains(t, list.Format(item0), "/")
assert.Equal(t, "inode/directory", list.Format(items[1])) assert.Equal(t, "inode/directory", list.Format(item1))
list.SetOutput(nil) list.SetOutput(nil)
list.AddPath() list.AddPath()
list.SetAbsolute(true) list.SetAbsolute(true)
assert.Equal(t, "/a", list.Format(items[0])) assert.Equal(t, "/a", list.Format(item0))
list.SetAbsolute(false) list.SetAbsolute(false)
assert.Equal(t, "a", list.Format(items[0])) assert.Equal(t, "a", list.Format(item0))
list.SetOutput(nil) list.SetOutput(nil)
list.AddSize() list.AddSize()
assert.Equal(t, "1", list.Format(items[0])) assert.Equal(t, "1", list.Format(item0))
list.AddPath() list.AddPath()
list.AddModTime() list.AddModTime()
list.SetDirSlash(true) list.SetDirSlash(true)
list.SetSeparator("__SEP__") list.SetSeparator("__SEP__")
assert.Equal(t, "1__SEP__a__SEP__"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0])) assert.Equal(t, "1__SEP__a__SEP__"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"__SEP__subdir/__SEP__"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1])) assert.Equal(t, "-1__SEP__subdir/__SEP__"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
for _, test := range []struct { for _, test := range []struct {
ht hash.Type ht hash.Type
@@ -939,10 +997,7 @@ func TestListFormat(t *testing.T) {
} { } {
list.SetOutput(nil) list.SetOutput(nil)
list.AddHash(test.ht) list.AddHash(test.ht)
got := list.Format(items[0]) assert.Equal(t, test.want, list.Format(item0))
if got != "UNSUPPORTED" && got != "" {
assert.Equal(t, test.want, got)
}
} }
list.SetOutput(nil) list.SetOutput(nil)
@@ -952,8 +1007,15 @@ func TestListFormat(t *testing.T) {
list.AddPath() list.AddPath()
list.AddModTime() list.AddModTime()
list.SetDirSlash(true) list.SetDirSlash(true)
assert.Equal(t, "1|a|"+items[0].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[0])) assert.Equal(t, "1|a|"+t1.Local().Format("2006-01-02 15:04:05"), list.Format(item0))
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"|subdir/|"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1])) assert.Equal(t, "-1|subdir/|"+t2.Local().Format("2006-01-02 15:04:05"), list.Format(item1))
list.SetOutput(nil)
list.SetSeparator("|")
list.AddPath()
list.AddEncrypted()
assert.Equal(t, "a|encryptedFileName", list.Format(item0))
assert.Equal(t, "subdir/|encryptedDirName/", list.Format(item1))
} }

View File

@@ -172,7 +172,7 @@ See the [` + op.name + ` command](/commands/rclone_` + op.name + `/) command for
} }
} }
// Mkdir a directory // Run a single command, eg Mkdir
func rcSingleCommand(in rc.Params, name string, noRemote bool) (out rc.Params, err error) { func rcSingleCommand(in rc.Params, name string, noRemote bool) (out rc.Params, err error) {
var ( var (
f fs.Fs f fs.Fs
@@ -240,7 +240,7 @@ See the [size command](/commands/rclone_size/) command for more information on t
}) })
} }
// Mkdir a directory // Size a directory
func rcSize(in rc.Params) (out rc.Params, err error) { func rcSize(in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(in) f, err := rc.GetFs(in)
if err != nil { if err != nil {
@@ -255,3 +255,38 @@ func rcSize(in rc.Params) (out rc.Params, err error) {
out["bytes"] = bytes out["bytes"] = bytes
return out, nil return out, nil
} }
func init() {
rc.Add(rc.Call{
Path: "operations/publiclink",
AuthRequired: true,
Fn: rcPublicLink,
Title: "Create or retrieve a public link to the given file or folder.",
Help: `This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
Returns
- url - URL of the resource
See the [link command](/commands/rclone_link/) command for more information on the above.
`,
})
}
// Make a public link
func rcPublicLink(in rc.Params) (out rc.Params, err error) {
f, remote, err := rc.GetFsAndRemote(in)
if err != nil {
return nil, err
}
url, err := PublicLink(f, remote)
if err != nil {
return nil, err
}
out = make(rc.Params)
out["url"] = url
return out, nil
}

View File

@@ -356,3 +356,16 @@ func TestRcSize(t *testing.T) {
"bytes": int64(120), "bytes": int64(120),
}, out) }, out)
} }
// operations/publiclink: Create or retrieve a public link to the given file or folder.
func TestRcPublicLink(t *testing.T) {
r, call := rcNewRun(t, "operations/publiclink")
defer r.Finalise()
in := rc.Params{
"fs": r.FremoteName,
"remote": "",
}
_, err := call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "doesn't support public links")
}

111
fs/operations/reopen.go Normal file
View File

@@ -0,0 +1,111 @@
package operations
import (
"io"
"sync"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// reOpen is a wrapper for an object reader which reopens the stream on error
type reOpen struct {
mu sync.Mutex // mutex to protect the below
src fs.Object // object to open
hashOption *fs.HashesOption // option to pass to initial open
rc io.ReadCloser // underlying stream
read int64 // number of bytes read from this stream
maxTries int // maximum number of retries
tries int // number of retries we've had so far in this stream
err error // if this is set then Read/Close calls will return it
opened bool // if set then rc is valid and needs closing
}
var (
errorFileClosed = errors.New("file already closed")
errorTooManyTries = errors.New("failed to reopen: too many retries")
)
// newReOpen makes a handle which will reopen itself and seek to where it was on errors
func newReOpen(src fs.Object, hashOption *fs.HashesOption, maxTries int) (rc io.ReadCloser, err error) {
h := &reOpen{
src: src,
hashOption: hashOption,
maxTries: maxTries,
}
h.mu.Lock()
defer h.mu.Unlock()
err = h.open()
if err != nil {
return nil, err
}
return h, nil
}
// open the underlying handle - call with lock held
//
// we don't retry here as the Open() call will itself have low level retries
func (h *reOpen) open() error {
var opts = make([]fs.OpenOption, 1)
if h.tries > 0 {
}
if h.read == 0 {
// put hashOption on if reading from the start, ditch otherwise
opts[0] = h.hashOption
} else {
// seek to the read point
opts[0] = &fs.SeekOption{Offset: h.read}
}
h.tries++
if h.tries > h.maxTries {
h.err = errorTooManyTries
} else {
h.rc, h.err = h.src.Open(opts...)
}
if h.err != nil {
if h.tries > 1 {
fs.Debugf(h.src, "Reopen failed after %d bytes read: %v", h.read, h.err)
}
return h.err
}
h.opened = true
return nil
}
// Read bytes retrying as necessary
func (h *reOpen) Read(p []byte) (n int, err error) {
h.mu.Lock()
defer h.mu.Unlock()
if h.err != nil {
// return a previous error if there is one
return n, h.err
}
n, err = h.rc.Read(p)
if err != nil {
h.err = err
}
h.read += int64(n)
if err != nil && err != io.EOF {
// close underlying stream
h.opened = false
_ = h.rc.Close()
// reopen stream, clearing error if successful
fs.Debugf(h.src, "Reopening on read failure after %d bytes: retry %d/%d: %v", h.read, h.tries, h.maxTries, err)
if h.open() == nil {
err = nil
}
}
return n, err
}
// Close the stream
func (h *reOpen) Close() error {
h.mu.Lock()
defer h.mu.Unlock()
if !h.opened {
return errorFileClosed
}
h.opened = false
h.err = errorFileClosed
return h.rc.Close()
}

View File

@@ -0,0 +1,144 @@
package operations
import (
"io"
"io/ioutil"
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
// check interface
var _ io.ReadCloser = (*reOpen)(nil)
var errorTestError = errors.New("test error")
// this is a wrapper for an mockobject with a custom Open function
//
// breaks indicate the number of bytes to read before returning an
// error
type reOpenTestObject struct {
fs.Object
breaks []int64
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
//
// This will break after reading the number of bytes in breaks
func (o *reOpenTestObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
rc, err := o.Object.Open(options...)
if err != nil {
return nil, err
}
if len(o.breaks) > 0 {
// Pop a breakpoint off
N := o.breaks[0]
o.breaks = o.breaks[1:]
// If 0 then return an error immediately
if N == 0 {
return nil, errorTestError
}
// Read N bytes then an error
r := io.MultiReader(&io.LimitedReader{R: rc, N: N}, errorReader{errorTestError})
// Wrap with Close in a new readCloser
rc = readCloser{Reader: r, Closer: rc}
}
return rc, nil
}
// Return an error only
type errorReader struct {
err error
}
// Read returning an error
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
// Contents for the mock object
var reOpenTestcontents = []byte("0123456789")
// Start the test with the given breaks
func testReOpen(breaks []int64, maxRetries int) (io.ReadCloser, error) {
srcOrig := mockobject.New("potato").WithContent(reOpenTestcontents, mockobject.SeekModeRegular)
src := &reOpenTestObject{
Object: srcOrig,
breaks: breaks,
}
hashOption := &fs.HashesOption{Hashes: hash.NewHashSet(hash.MD5)}
return newReOpen(src, hashOption, maxRetries)
}
func TestReOpenBasics(t *testing.T) {
// open
h, err := testReOpen(nil, 10)
assert.NoError(t, err)
// Check contents read correctly
got, err := ioutil.ReadAll(h)
assert.NoError(t, err)
assert.Equal(t, reOpenTestcontents, got)
// Check read after end
var buf = make([]byte, 1)
n, err := h.Read(buf)
assert.Equal(t, 0, n)
assert.Equal(t, io.EOF, err)
// Check close
assert.NoError(t, h.Close())
// Check double close
assert.Equal(t, errorFileClosed, h.Close())
// Check read after close
n, err = h.Read(buf)
assert.Equal(t, 0, n)
assert.Equal(t, errorFileClosed, err)
}
func TestReOpenErrorAtStart(t *testing.T) {
// open with immediate breaking
h, err := testReOpen([]int64{0}, 10)
assert.Equal(t, errorTestError, err)
assert.Nil(t, h)
}
func TestReOpenError(t *testing.T) {
// open with a few break points but less than the max
h, err := testReOpen([]int64{2, 1, 3}, 10)
assert.NoError(t, err)
// check contents
got, err := ioutil.ReadAll(h)
assert.NoError(t, err)
assert.Equal(t, reOpenTestcontents, got)
// check close
assert.NoError(t, h.Close())
}
func TestReOpenFail(t *testing.T) {
// open with a few break points but >= the max
h, err := testReOpen([]int64{2, 1, 3}, 3)
assert.NoError(t, err)
// check contents
got, err := ioutil.ReadAll(h)
assert.Equal(t, errorTestError, err)
assert.Equal(t, reOpenTestcontents[:6], got)
// check old error is returned
var buf = make([]byte, 1)
n, err := h.Read(buf)
assert.Equal(t, 0, n)
assert.Equal(t, errorTooManyTries, err)
// Check close
assert.Equal(t, errorFileClosed, h.Close())
}

View File

@@ -8,13 +8,23 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var optionBlock = map[string]interface{}{} var (
optionBlock = map[string]interface{}{}
optionReload = map[string]func() error{}
)
// AddOption adds an option set // AddOption adds an option set
func AddOption(name string, option interface{}) { func AddOption(name string, option interface{}) {
optionBlock[name] = option optionBlock[name] = option
} }
// AddOptionReload adds an option set with a reload function to be
// called when options are changed
func AddOptionReload(name string, option interface{}, reload func() error) {
optionBlock[name] = option
optionReload[name] = reload
}
func init() { func init() {
Add(Call{ Add(Call{
Path: "options/blocks", Path: "options/blocks",
@@ -103,7 +113,12 @@ func rcOptionsSet(in Params) (out Params, err error) {
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to write options from block %q", name) return nil, errors.Wrapf(err, "failed to write options from block %q", name)
} }
if reload := optionReload[name]; reload != nil {
err = reload()
if err != nil {
return nil, errors.Wrapf(err, "failed to reload options from block %q", name)
}
}
} }
return out, nil return out, nil
} }

View File

@@ -1,8 +1,10 @@
package rc package rc
import ( import (
"fmt"
"testing" "testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -24,9 +26,21 @@ func TestAddOption(t *testing.T) {
assert.Equal(t, len(optionBlock), 0) assert.Equal(t, len(optionBlock), 0)
AddOption("potato", &testOptions) AddOption("potato", &testOptions)
assert.Equal(t, len(optionBlock), 1) assert.Equal(t, len(optionBlock), 1)
assert.Equal(t, len(optionReload), 0)
assert.Equal(t, &testOptions, optionBlock["potato"]) assert.Equal(t, &testOptions, optionBlock["potato"])
} }
func TestAddOptionReload(t *testing.T) {
defer clearOptionBlock()
assert.Equal(t, len(optionBlock), 0)
reload := func() error { return nil }
AddOptionReload("potato", &testOptions, reload)
assert.Equal(t, len(optionBlock), 1)
assert.Equal(t, len(optionReload), 1)
assert.Equal(t, &testOptions, optionBlock["potato"])
assert.Equal(t, fmt.Sprintf("%p", reload), fmt.Sprintf("%p", optionReload["potato"]))
}
func TestOptionsBlocks(t *testing.T) { func TestOptionsBlocks(t *testing.T) {
defer clearOptionBlock() defer clearOptionBlock()
AddOption("potato", &testOptions) AddOption("potato", &testOptions)
@@ -53,7 +67,14 @@ func TestOptionsGet(t *testing.T) {
func TestOptionsSet(t *testing.T) { func TestOptionsSet(t *testing.T) {
defer clearOptionBlock() defer clearOptionBlock()
AddOption("potato", &testOptions) var reloaded int
AddOptionReload("potato", &testOptions, func() error {
if reloaded > 0 {
return errors.New("error while reloading")
}
reloaded++
return nil
})
call := Calls.Get("options/set") call := Calls.Get("options/set")
require.NotNil(t, call) require.NotNil(t, call)
@@ -67,6 +88,12 @@ func TestOptionsSet(t *testing.T) {
require.Nil(t, out) require.Nil(t, out)
assert.Equal(t, 50, testOptions.Int) assert.Equal(t, 50, testOptions.Int)
assert.Equal(t, "hello", testOptions.String) assert.Equal(t, "hello", testOptions.String)
assert.Equal(t, 1, reloaded)
// error from reload
_, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "error while reloading")
// unknown option block // unknown option block
in = Params{ in = Params{
@@ -85,4 +112,5 @@ func TestOptionsSet(t *testing.T) {
_, err = call.Fn(in) _, err = call.Fn(in)
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "failed to write options") assert.Contains(t, err.Error(), "failed to write options")
} }

View File

@@ -188,8 +188,8 @@ func rcJobStatus(in Params) (out Params, err error) {
defer job.mu.Unlock() defer job.mu.Unlock()
out = make(Params) out = make(Params)
err = Reshape(&out, job) err = Reshape(&out, job)
if job == nil { if err != nil {
return nil, errors.New("Reshape failed in job status") return nil, errors.Wrap(err, "reshape failed in job status")
} }
return out, nil return out, nil
} }

View File

@@ -39,17 +39,21 @@ func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
createEmptySrcDirs, err := in.GetBool("createEmptySrcDirs")
if rc.NotErrParamNotFound(err) {
return nil, err
}
switch name { switch name {
case "sync": case "sync":
return nil, Sync(dstFs, srcFs) return nil, Sync(dstFs, srcFs, createEmptySrcDirs)
case "copy": case "copy":
return nil, CopyDir(dstFs, srcFs) return nil, CopyDir(dstFs, srcFs, createEmptySrcDirs)
case "move": case "move":
deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs") deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs")
if rc.NotErrParamNotFound(err) { if rc.NotErrParamNotFound(err) {
return nil, err return nil, err
} }
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs) return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs)
} }
panic("unknown rcSyncCopyMove type") panic("unknown rcSyncCopyMove type")
} }

View File

@@ -24,6 +24,7 @@ type syncCopyMove struct {
fsrc fs.Fs fsrc fs.Fs
deleteMode fs.DeleteMode // how we are doing deletions deleteMode fs.DeleteMode // how we are doing deletions
DoMove bool DoMove bool
copyEmptySrcDirs bool
deleteEmptySrcDirs bool deleteEmptySrcDirs bool
dir string dir string
// internal state // internal state
@@ -63,12 +64,16 @@ type syncCopyMove struct {
suffix string // suffix to add to files placed in backupDir suffix string // suffix to add to files placed in backupDir
} }
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) { func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
return nil, fserrors.FatalError(fs.ErrorOverlapping)
}
s := &syncCopyMove{ s := &syncCopyMove{
fdst: fdst, fdst: fdst,
fsrc: fsrc, fsrc: fsrc,
deleteMode: deleteMode, deleteMode: deleteMode,
DoMove: DoMove, DoMove: DoMove,
copyEmptySrcDirs: copyEmptySrcDirs,
deleteEmptySrcDirs: deleteEmptySrcDirs, deleteEmptySrcDirs: deleteEmptySrcDirs,
dir: "", dir: "",
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers), srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
@@ -221,7 +226,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) {
} else { } else {
// If destination already exists, then we must move it into --backup-dir if required // If destination already exists, then we must move it into --backup-dir if required
if pair.Dst != nil && s.backupDir != nil { if pair.Dst != nil && s.backupDir != nil {
remoteWithSuffix := pair.Dst.Remote() + s.suffix remoteWithSuffix := operations.SuffixName(pair.Dst.Remote())
overwritten, _ := s.backupDir.NewObject(remoteWithSuffix) overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst) _, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
if err != nil { if err != nil {
@@ -686,7 +691,9 @@ func (s *syncCopyMove) run() error {
s.stopTransfers() s.stopTransfers()
s.stopDeleters() s.stopDeleters()
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs)) if s.copyEmptySrcDirs {
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
}
// Delete files after // Delete files after
if s.deleteMode == fs.DeleteModeAfter { if s.deleteMode == fs.DeleteModeAfter {
@@ -849,7 +856,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
// If DoMove is true then files will be moved instead of copied // If DoMove is true then files will be moved instead of copied
// //
// dir is the start directory, "" for root // dir is the start directory, "" for root
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error { func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
if deleteMode != fs.DeleteModeOff && DoMove { if deleteMode != fs.DeleteModeOff && DoMove {
return fserrors.FatalError(errors.New("can't delete and move at the same time")) return fserrors.FatalError(errors.New("can't delete and move at the same time"))
} }
@@ -859,7 +866,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames")) return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
} }
// only delete stuff during in this pass // only delete stuff during in this pass
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs) do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
if err != nil { if err != nil {
return err return err
} }
@@ -870,7 +877,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
// Next pass does a copy only // Next pass does a copy only
deleteMode = fs.DeleteModeOff deleteMode = fs.DeleteModeOff
} }
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs) do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
if err != nil { if err != nil {
return err return err
} }
@@ -878,22 +885,22 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
} }
// Sync fsrc into fdst // Sync fsrc into fdst
func Sync(fdst, fsrc fs.Fs) error { func Sync(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false) return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
} }
// CopyDir copies fsrc into fdst // CopyDir copies fsrc into fdst
func CopyDir(fdst, fsrc fs.Fs) error { func CopyDir(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false) return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
} }
// moveDir moves fsrc into fdst // moveDir moves fsrc into fdst
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error { func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs) return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
} }
// MoveDir moves fsrc into fdst // MoveDir moves fsrc into fdst
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error { func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
if operations.Same(fdst, fsrc) { if operations.Same(fdst, fsrc) {
fs.Errorf(fdst, "Nothing to do as source and destination are the same") fs.Errorf(fdst, "Nothing to do as source and destination are the same")
return nil return nil
@@ -920,13 +927,6 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
} }
} }
// The two remotes mustn't overlap if we didn't do server side move
if operations.Overlapping(fdst, fsrc) {
err := fs.ErrorCantMoveOverlapping
fs.Errorf(fdst, "%v", err)
return err
}
// Otherwise move the files one by one // Otherwise move the files one by one
return moveDir(fdst, fsrc, deleteEmptySrcDirs) return moveDir(fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
} }

View File

@@ -11,6 +11,7 @@ import (
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting" "github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/filter" "github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest" "github.com/ncw/rclone/fstest"
@@ -39,7 +40,7 @@ func TestCopyWithDryRun(t *testing.T) {
r.Mkdir(r.Fremote) r.Mkdir(r.Fremote)
fs.Config.DryRun = true fs.Config.DryRun = true
err := CopyDir(r.Fremote, r.Flocal) err := CopyDir(r.Fremote, r.Flocal, false)
fs.Config.DryRun = false fs.Config.DryRun = false
require.NoError(t, err) require.NoError(t, err)
@@ -54,7 +55,7 @@ func TestCopy(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
r.Mkdir(r.Fremote) r.Mkdir(r.Fremote)
err := CopyDir(r.Fremote, r.Flocal) err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@@ -71,7 +72,7 @@ func TestCopyNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := CopyDir(r.Fremote, r.Flocal) err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@@ -89,7 +90,7 @@ func TestSyncNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@@ -107,7 +108,7 @@ func TestCopyWithDepth(t *testing.T) {
fs.Config.MaxDepth = 1 fs.Config.MaxDepth = 1
defer func() { fs.Config.MaxDepth = -1 }() defer func() { fs.Config.MaxDepth = -1 }()
err := CopyDir(r.Fremote, r.Flocal) err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2) fstest.CheckItems(t, r.Flocal, file1, file2)
@@ -135,7 +136,7 @@ func TestCopyWithFilesFrom(t *testing.T) {
} }
defer unpatch() defer unpatch()
err = CopyDir(r.Fremote, r.Flocal) err = CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
unpatch() unpatch()
@@ -152,7 +153,59 @@ func TestCopyEmptyDirectories(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
r.Mkdir(r.Fremote) r.Mkdir(r.Fremote)
err = CopyDir(r.Fremote, r.Flocal) err = CopyDir(r.Fremote, r.Flocal, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
},
[]string{
"sub dir",
"sub dir2",
},
fs.GetModifyWindow(r.Fremote),
)
}
// Test move empty directories
func TestMoveEmptyDirectories(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := operations.Mkdir(r.Flocal, "sub dir2")
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = MoveDir(r.Fremote, r.Flocal, false, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
},
[]string{
"sub dir",
"sub dir2",
},
fs.GetModifyWindow(r.Fremote),
)
}
// Test sync empty directories
func TestSyncEmptyDirectories(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := operations.Mkdir(r.Flocal, "sub dir2")
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = Sync(r.Fremote, r.Flocal, true)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckListingWithPrecision( fstest.CheckListingWithPrecision(
@@ -181,7 +234,7 @@ func TestServerSideCopy(t *testing.T) {
defer finaliseCopy() defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy) t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
err = CopyDir(FremoteCopy, r.Fremote) err = CopyDir(FremoteCopy, r.Fremote, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, FremoteCopy, file1) fstest.CheckItems(t, FremoteCopy, file1)
@@ -199,7 +252,7 @@ func TestCopyAfterDelete(t *testing.T) {
err := operations.Mkdir(r.Flocal, "") err := operations.Mkdir(r.Flocal, "")
require.NoError(t, err) require.NoError(t, err)
err = CopyDir(r.Fremote, r.Flocal) err = CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Flocal)
@@ -213,7 +266,7 @@ func TestCopyRedownload(t *testing.T) {
file1 := r.WriteObject("sub dir/hello world", "hello world", t1) file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
err := CopyDir(r.Flocal, r.Fremote) err := CopyDir(r.Flocal, r.Fremote, false)
require.NoError(t, err) require.NoError(t, err)
// Test with combined precision of local and remote as we copied it there and back // Test with combined precision of local and remote as we copied it there and back
@@ -233,7 +286,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred exactly one file. // We should have transferred exactly one file.
@@ -245,7 +298,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal) err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred no files // We should have transferred no files
@@ -267,7 +320,7 @@ func TestSyncSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred exactly one file. // We should have transferred exactly one file.
@@ -279,7 +332,7 @@ func TestSyncSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal) err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred no files // We should have transferred no files
@@ -301,7 +354,7 @@ func TestSyncIgnoreSize(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred exactly one file. // We should have transferred exactly one file.
@@ -313,7 +366,7 @@ func TestSyncIgnoreSize(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal) err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred no files // We should have transferred no files
@@ -329,7 +382,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred exactly 0 files because the // We should have transferred exactly 0 files because the
@@ -340,7 +393,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
defer func() { fs.Config.IgnoreTimes = false }() defer func() { fs.Config.IgnoreTimes = false }()
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal) err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred exactly one file even though the // We should have transferred exactly one file even though the
@@ -360,7 +413,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
defer func() { fs.Config.IgnoreExisting = false }() defer func() { fs.Config.IgnoreExisting = false }()
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@@ -368,7 +421,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
// Change everything // Change everything
r.WriteFile("existing", "newpotatoes", t2) r.WriteFile("existing", "newpotatoes", t2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal) err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// Items should not change // Items should not change
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@@ -416,7 +469,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
fs.CountError(nil) fs.CountError(nil)
assert.NoError(t, Sync(r.Fremote, r.Flocal)) assert.NoError(t, Sync(r.Fremote, r.Flocal, false))
fstest.CheckListingWithPrecision( fstest.CheckListingWithPrecision(
t, t,
@@ -459,7 +512,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
defer func() { fs.Config.DryRun = false }() defer func() { fs.Config.DryRun = false }()
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@@ -468,7 +521,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
fs.Config.DryRun = false fs.Config.DryRun = false
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal) err = Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@@ -496,7 +549,7 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@@ -517,7 +570,7 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@@ -537,7 +590,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2) fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file2) fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -552,7 +605,7 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
@@ -575,7 +628,7 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
@@ -591,7 +644,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
fs.Config.DryRun = true fs.Config.DryRun = true
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
fs.Config.DryRun = false fs.Config.DryRun = false
require.NoError(t, err) require.NoError(t, err)
@@ -610,7 +663,7 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1, file3) fstest.CheckItems(t, r.Flocal, file1, file3)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file3) fstest.CheckItems(t, r.Flocal, file1, file3)
fstest.CheckItems(t, r.Fremote, file1, file3) fstest.CheckItems(t, r.Fremote, file1, file3)
@@ -656,7 +709,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
) )
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckListingWithPrecision( fstest.CheckListingWithPrecision(
@@ -726,7 +779,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
fs.CountError(nil) fs.CountError(nil)
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
assert.Equal(t, fs.ErrorNotDeleting, err) assert.Equal(t, fs.ErrorNotDeleting, err)
fstest.CheckListingWithPrecision( fstest.CheckListingWithPrecision(
@@ -803,7 +856,7 @@ func TestCopyDeleteBefore(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := CopyDir(r.Fremote, r.Flocal) err := CopyDir(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file1, file2) fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -826,14 +879,14 @@ func TestSyncWithExclude(t *testing.T) {
}() }()
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2, file1) fstest.CheckItems(t, r.Fremote, file2, file1)
// Now sync the other way round and check enormous doesn't get // Now sync the other way round and check enormous doesn't get
// deleted as it is excluded from the sync // deleted as it is excluded from the sync
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote) err = Sync(r.Flocal, r.Fremote, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2, file1, file3) fstest.CheckItems(t, r.Flocal, file2, file1, file3)
} }
@@ -856,14 +909,14 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
}() }()
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
// Check sync the other way round to make sure enormous gets // Check sync the other way round to make sure enormous gets
// deleted even though it is excluded // deleted even though it is excluded
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote) err = Sync(r.Flocal, r.Fremote, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
} }
@@ -898,7 +951,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
}() }()
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF) fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
} }
@@ -922,7 +975,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
f2 := r.WriteFile("yam", "Yam Content", t2) f2 := r.WriteFile("yam", "Yam Content", t2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal)) require.NoError(t, Sync(r.Fremote, r.Flocal, false))
fstest.CheckItems(t, r.Fremote, f1, f2) fstest.CheckItems(t, r.Fremote, f1, f2)
fstest.CheckItems(t, r.Flocal, f1, f2) fstest.CheckItems(t, r.Flocal, f1, f2)
@@ -931,7 +984,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
f2 = r.RenameFile(f2, "yaml") f2 = r.RenameFile(f2, "yaml")
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal)) require.NoError(t, Sync(r.Fremote, r.Flocal, false))
fstest.CheckItems(t, r.Fremote, f1, f2) fstest.CheckItems(t, r.Fremote, f1, f2)
@@ -968,7 +1021,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
// Do server side move // Do server side move
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs) err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs, false)
require.NoError(t, err) require.NoError(t, err)
if withFilter { if withFilter {
@@ -995,7 +1048,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
// Move it back to a new empty remote, dst does not exist this time // Move it back to a new empty remote, dst does not exist this time
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs) err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs, false)
require.NoError(t, err) require.NoError(t, err)
if withFilter { if withFilter {
@@ -1020,7 +1073,7 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) {
r.Mkdir(r.Fremote) r.Mkdir(r.Fremote)
// run move with --delete-empty-src-dirs // run move with --delete-empty-src-dirs
err := MoveDir(r.Fremote, r.Flocal, true) err := MoveDir(r.Fremote, r.Flocal, true, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckListingWithPrecision( fstest.CheckListingWithPrecision(
@@ -1040,7 +1093,7 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
file2 := r.WriteFile("nested/sub dir/file", "nested", t1) file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
r.Mkdir(r.Fremote) r.Mkdir(r.Fremote)
err := MoveDir(r.Fremote, r.Flocal, false) err := MoveDir(r.Fremote, r.Flocal, false, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckListingWithPrecision( fstest.CheckListingWithPrecision(
@@ -1101,20 +1154,41 @@ func TestServerSideMoveOverlap(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
// Subdir move with no filters should return ErrorCantMoveOverlapping // Subdir move with no filters should return ErrorCantMoveOverlapping
err = MoveDir(FremoteMove, r.Fremote, false) err = MoveDir(FremoteMove, r.Fremote, false, false)
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error()) assert.EqualError(t, err, fs.ErrorOverlapping.Error())
// Now try with a filter which should also fail with ErrorCantMoveOverlapping // Now try with a filter which should also fail with ErrorCantMoveOverlapping
filter.Active.Opt.MinSize = 40 filter.Active.Opt.MinSize = 40
defer func() { defer func() {
filter.Active.Opt.MinSize = -1 filter.Active.Opt.MinSize = -1
}() }()
err = MoveDir(FremoteMove, r.Fremote, false) err = MoveDir(FremoteMove, r.Fremote, false, false)
assert.EqualError(t, err, fs.ErrorCantMoveOverlapping.Error()) assert.EqualError(t, err, fs.ErrorOverlapping.Error())
}
// Test a sync with overlap
func TestSyncOverlap(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
subRemoteName := r.FremoteName + "/rclone-sync-test"
FremoteSync, err := fs.NewFs(subRemoteName)
require.NoError(t, err)
checkErr := func(err error) {
require.Error(t, err)
assert.True(t, fserrors.IsFatalError(err))
assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())
}
checkErr(Sync(FremoteSync, r.Fremote, false))
checkErr(Sync(r.Fremote, FremoteSync, false))
checkErr(Sync(r.Fremote, r.Fremote, false))
checkErr(Sync(FremoteSync, FremoteSync, false))
} }
// Test with BackupDir set // Test with BackupDir set
func testSyncBackupDir(t *testing.T, suffix string) { func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@@ -1125,16 +1199,18 @@ func testSyncBackupDir(t *testing.T, suffix string) {
fs.Config.BackupDir = r.FremoteName + "/backup" fs.Config.BackupDir = r.FremoteName + "/backup"
fs.Config.Suffix = suffix fs.Config.Suffix = suffix
fs.Config.SuffixKeepExtension = suffixKeepExtension
defer func() { defer func() {
fs.Config.BackupDir = "" fs.Config.BackupDir = ""
fs.Config.Suffix = "" fs.Config.Suffix = ""
fs.Config.SuffixKeepExtension = false
}() }()
// Make the setup so we have one, two, three in the dest // Make the setup so we have one, two, three in the dest
// and one (different), two (same) in the source // and one (different), two (same) in the source
file1 := r.WriteObject("dst/one", "one", t1) file1 := r.WriteObject("dst/one", "one", t1)
file2 := r.WriteObject("dst/two", "two", t1) file2 := r.WriteObject("dst/two", "two", t1)
file3 := r.WriteObject("dst/three", "three", t1) file3 := r.WriteObject("dst/three.txt", "three", t1)
file2a := r.WriteFile("two", "two", t1) file2a := r.WriteFile("two", "two", t1)
file1a := r.WriteFile("one", "oneA", t2) file1a := r.WriteFile("one", "oneA", t2)
@@ -1145,7 +1221,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
require.NoError(t, err) require.NoError(t, err)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal) err = Sync(fdst, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// one should be moved to the backup dir and the new one installed // one should be moved to the backup dir and the new one installed
@@ -1153,20 +1229,24 @@ func testSyncBackupDir(t *testing.T, suffix string) {
file1a.Path = "dst/one" file1a.Path = "dst/one"
// two should be unchanged // two should be unchanged
// three should be moved to the backup dir // three should be moved to the backup dir
file3.Path = "backup/three" + suffix if suffixKeepExtension {
file3.Path = "backup/three" + suffix + ".txt"
} else {
file3.Path = "backup/three.txt" + suffix
}
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a) fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
// Now check what happens if we do it again // Now check what happens if we do it again
// Restore a different three and update one in the source // Restore a different three and update one in the source
file3a := r.WriteObject("dst/three", "threeA", t2) file3a := r.WriteObject("dst/three.txt", "threeA", t2)
file1b := r.WriteFile("one", "oneBB", t3) file1b := r.WriteFile("one", "oneBB", t3)
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a, file3a) fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a, file3a)
// This should delete three and overwrite one again, checking // This should delete three and overwrite one again, checking
// the files got overwritten correctly in backup-dir // the files got overwritten correctly in backup-dir
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal) err = Sync(fdst, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// one should be moved to the backup dir and the new one installed // one should be moved to the backup dir and the new one installed
@@ -1174,12 +1254,17 @@ func testSyncBackupDir(t *testing.T, suffix string) {
file1b.Path = "dst/one" file1b.Path = "dst/one"
// two should be unchanged // two should be unchanged
// three should be moved to the backup dir // three should be moved to the backup dir
file3a.Path = "backup/three" + suffix if suffixKeepExtension {
file3a.Path = "backup/three" + suffix + ".txt"
} else {
file3a.Path = "backup/three.txt" + suffix
}
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a) fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
} }
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "") } func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "", false) }
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak") } func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak", false) }
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) { testSyncBackupDir(t, "-2019-01-01", true) }
// Check we can sync two files with differing UTF-8 representations // Check we can sync two files with differing UTF-8 representations
func TestSyncUTFNorm(t *testing.T) { func TestSyncUTFNorm(t *testing.T) {
@@ -1203,7 +1288,7 @@ func TestSyncUTFNorm(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
// We should have transferred exactly one file, but kept the // We should have transferred exactly one file, but kept the
@@ -1229,7 +1314,7 @@ func TestSyncImmutable(t *testing.T) {
// Should succeed // Should succeed
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@@ -1241,7 +1326,7 @@ func TestSyncImmutable(t *testing.T) {
// Should fail with ErrorImmutableModified and not modify local or remote files // Should fail with ErrorImmutableModified and not modify local or remote files
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal) err = Sync(r.Fremote, r.Flocal, false)
assert.EqualError(t, err, fs.ErrorImmutableModified.Error()) assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@@ -1277,6 +1362,6 @@ func TestAbort(t *testing.T) {
accounting.Stats.ResetCounters() accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal) err := Sync(r.Fremote, r.Flocal, false)
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err) assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
} }

View File

@@ -1,4 +1,4 @@
package fs package fs
// Version of rclone // Version of rclone
var Version = "v1.46" var Version = "v1.46-DEV"

View File

@@ -51,7 +51,7 @@ type Func func(path string, entries fs.DirEntries, err error) error
// //
// Parent directories are always listed before their children // Parent directories are always listed before their children
// //
// This is implemented by WalkR if Config.UseRecursiveListing is true // This is implemented by WalkR if Config.UseUseListR is true
// and f supports it and level > 1, or WalkN otherwise. // and f supports it and level > 1, or WalkN otherwise.
// //
// If --files-from is set then a DirTree will be constructed with just // If --files-from is set then a DirTree will be constructed with just
@@ -62,12 +62,265 @@ func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
if filter.Active.HaveFilesFrom() { if filter.Active.HaveFilesFrom() {
return walkR(f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(f.NewObject)) return walkR(f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(f.NewObject))
} }
// FIXME should this just be maxLevel < 0 - why the maxLevel > 1
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil { if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
return walkListR(f, path, includeAll, maxLevel, fn) return walkListR(f, path, includeAll, maxLevel, fn)
} }
return walkListDirSorted(f, path, includeAll, maxLevel, fn) return walkListDirSorted(f, path, includeAll, maxLevel, fn)
} }
// ListType is uses to choose which combination of files or directories is requires
type ListType byte
// Types of listing for ListR
const (
ListObjects ListType = 1 << iota // list objects only
ListDirs // list dirs only
ListAll = ListObjects | ListDirs // list files and dirs
)
// Objects returns true if the list type specifies objects
func (l ListType) Objects() bool {
return (l & ListObjects) != 0
}
// Dirs returns true if the list type specifies dirs
func (l ListType) Dirs() bool {
return (l & ListDirs) != 0
}
// Filter in (inplace) to only contain the type of list entry required
func (l ListType) Filter(in *fs.DirEntries) {
if l == ListAll {
return
}
out := (*in)[:0]
for _, entry := range *in {
switch entry.(type) {
case fs.Object:
if l.Objects() {
out = append(out, entry)
}
case fs.Directory:
if l.Dirs() {
out = append(out, entry)
}
default:
fs.Errorf(nil, "Unknown object type %T", entry)
}
}
*in = out
}
// ListR lists the directory recursively.
//
// If includeAll is not set it will use the filters defined.
//
// If maxLevel is < 0 then it will recurse indefinitely, else it will
// only do maxLevel levels.
//
// If synthesizeDirs is set then for bucket based remotes it will
// synthesize directories from the file structure. This uses extra
// memory so don't set this if you don't need directories, likewise do
// set this if you are interested in directories.
//
// It calls fn for each tranche of DirEntries read. Note that these
// don't necessarily represent a directory
//
// Note that fn will not be called concurrently whereas the directory
// listing will proceed concurrently.
//
// Directories are not listed in any particular order so you can't
// rely on parents coming before children or alphabetical ordering
//
// This is implemented by using ListR on the backend if possible and
// efficient, otherwise by Walk.
//
// NB (f, path) to be replaced by fs.Dir at some point
func ListR(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
// FIXME disable this with --no-fast-list ??? `--disable ListR` will do it...
doListR := f.Features().ListR
// Can't use ListR if...
if doListR == nil || // ...no ListR
filter.Active.HaveFilesFrom() || // ...using --files-from
maxLevel >= 0 || // ...using bounded recursion
len(filter.Active.Opt.ExcludeFile) > 0 || // ...using --exclude-file
filter.Active.BoundedRecursion() { // ...filters imply bounded recursion
return listRwalk(f, path, includeAll, maxLevel, listType, fn)
}
return listR(f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased)
}
// listRwalk walks the file tree for ListR using Walk
func listRwalk(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
var listErr error
walkErr := Walk(f, path, includeAll, maxLevel, func(path string, entries fs.DirEntries, err error) error {
// Carry on listing but return the error at the end
if err != nil {
listErr = err
fs.CountError(err)
fs.Errorf(path, "error listing: %v", err)
return nil
}
listType.Filter(&entries)
return fn(entries)
})
if listErr != nil {
return listErr
}
return walkErr
}
// dirMap keeps track of directories made for bucket based remotes.
// true => directory has been sent
// false => directory has been seen but not sent
type dirMap struct {
mu sync.Mutex
m map[string]bool
root string
}
// make a new dirMap
func newDirMap(root string) *dirMap {
return &dirMap{
m: make(map[string]bool),
root: root,
}
}
// add adds a directory and parents with sent
func (dm *dirMap) add(dir string, sent bool) {
for {
if dir == dm.root || dir == "" {
return
}
currentSent, found := dm.m[dir]
if found {
// If it has been sent already then nothing more to do
if currentSent {
return
}
// If not sent already don't override
if !sent {
return
}
// currenSent == false && sent == true so needs overriding
}
dm.m[dir] = sent
// Add parents in as unsent
dir = parentDir(dir)
sent = false
}
}
// add all the directories in entries and their parents to the dirMap
func (dm *dirMap) addEntries(entries fs.DirEntries) error {
dm.mu.Lock()
defer dm.mu.Unlock()
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
dm.add(parentDir(x.Remote()), false)
case fs.Directory:
dm.add(x.Remote(), true)
default:
return errors.Errorf("unknown object type %T", entry)
}
}
return nil
}
// send any missing parents to fn
func (dm *dirMap) sendEntries(fn fs.ListRCallback) (err error) {
// Count the strings first so we allocate the minimum memory
n := 0
for _, sent := range dm.m {
if !sent {
n++
}
}
if n == 0 {
return nil
}
dirs := make([]string, 0, n)
// Fill the dirs up then sort it
for dir, sent := range dm.m {
if !sent {
dirs = append(dirs, dir)
}
}
sort.Strings(dirs)
// Now convert to bulkier Dir in batches and send
now := time.Now()
list := NewListRHelper(fn)
for _, dir := range dirs {
err = list.Add(fs.NewDir(dir, now))
if err != nil {
return err
}
}
return list.Flush()
}
// listR walks the file tree using ListR
func listR(f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListRCallback, doListR fs.ListRFn, synthesizeDirs bool) error {
includeDirectory := filter.Active.IncludeDirectory(f)
if !includeAll {
includeAll = filter.Active.InActive()
}
var dm *dirMap
if synthesizeDirs {
dm = newDirMap(path)
}
var mu sync.Mutex
err := doListR(path, func(entries fs.DirEntries) (err error) {
if synthesizeDirs {
err = dm.addEntries(entries)
if err != nil {
return err
}
}
listType.Filter(&entries)
if !includeAll {
filteredEntries := entries[:0]
for _, entry := range entries {
var include bool
switch x := entry.(type) {
case fs.Object:
include = filter.Active.IncludeObject(x)
case fs.Directory:
include, err = includeDirectory(x.Remote())
if err != nil {
return err
}
default:
return errors.Errorf("unknown object type %T", entry)
}
if include {
filteredEntries = append(filteredEntries, entry)
} else {
fs.Debugf(entry, "Excluded from sync (and deletion)")
}
}
entries = filteredEntries
}
mu.Lock()
defer mu.Unlock()
return fn(entries)
})
if err != nil {
return err
}
if synthesizeDirs {
err = dm.sendEntries(fn)
if err != nil {
return err
}
}
return nil
}
// walkListDirSorted lists the directory. // walkListDirSorted lists the directory.
// //
// It implements Walk using non recursive directory listing. // It implements Walk using non recursive directory listing.
@@ -506,12 +759,9 @@ func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR f
return nil return nil
} }
// GetAll runs Walk getting all the results // GetAll runs ListR getting all the results
func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) { func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error { err = ListR(f, path, includeAll, maxLevel, ListAll, func(entries fs.DirEntries) error {
if err != nil {
return err
}
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:

View File

@@ -2,12 +2,15 @@ package walk
import ( import (
"fmt" "fmt"
"io"
"strings"
"sync" "sync"
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter" "github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fstest/mockdir" "github.com/ncw/rclone/fstest/mockdir"
"github.com/ncw/rclone/fstest/mockfs"
"github.com/ncw/rclone/fstest/mockobject" "github.com/ncw/rclone/fstest/mockobject"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -634,3 +637,307 @@ b/c/d/
// Set to default value, to avoid side effects // Set to default value, to avoid side effects
filter.Active.Opt.ExcludeFile = "" filter.Active.Opt.ExcludeFile = ""
} }
func TestListType(t *testing.T) {
assert.Equal(t, true, ListObjects.Objects())
assert.Equal(t, false, ListObjects.Dirs())
assert.Equal(t, false, ListDirs.Objects())
assert.Equal(t, true, ListDirs.Dirs())
assert.Equal(t, true, ListAll.Objects())
assert.Equal(t, true, ListAll.Dirs())
var (
a = mockobject.Object("a")
b = mockobject.Object("b")
dir = mockdir.New("dir")
adir = mockobject.Object("dir/a")
dir2 = mockdir.New("dir2")
origEntries = fs.DirEntries{
a, b, dir, adir, dir2,
}
dirEntries = fs.DirEntries{
dir, dir2,
}
objEntries = fs.DirEntries{
a, b, adir,
}
)
copyOrigEntries := func() (out fs.DirEntries) {
out = make(fs.DirEntries, len(origEntries))
copy(out, origEntries)
return out
}
got := copyOrigEntries()
ListAll.Filter(&got)
assert.Equal(t, origEntries, got)
got = copyOrigEntries()
ListObjects.Filter(&got)
assert.Equal(t, objEntries, got)
got = copyOrigEntries()
ListDirs.Filter(&got)
assert.Equal(t, dirEntries, got)
}
func TestListR(t *testing.T) {
objects := fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b"),
mockdir.New("dir"),
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockobject.Object("dir/c"),
}
f := mockfs.NewFs("mock", "/")
var got []string
clearCallback := func() {
got = nil
}
callback := func(entries fs.DirEntries) error {
for _, entry := range entries {
got = append(got, entry.Remote())
}
return nil
}
doListR := func(dir string, callback fs.ListRCallback) error {
var os fs.DirEntries
for _, o := range objects {
if dir == "" || strings.HasPrefix(o.Remote(), dir+"/") {
os = append(os, o)
}
}
return callback(os)
}
// Setup filter
oldFilter := filter.Active
defer func() {
filter.Active = oldFilter
}()
var err error
filter.Active, err = filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, filter.Active.AddRule("+ b"))
require.NoError(t, filter.Active.AddRule("- *"))
// Base case
clearCallback()
err = listR(f, "", true, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir", "dir/a", "dir/b", "dir/c"}, got)
// Base case - with Objects
clearCallback()
err = listR(f, "", true, ListObjects, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/c"}, got)
// Base case - with Dirs
clearCallback()
err = listR(f, "", true, ListDirs, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir"}, got)
// With filter
clearCallback()
err = listR(f, "", false, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir", "dir/b"}, got)
// With filter - with Objects
clearCallback()
err = listR(f, "", false, ListObjects, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir/b"}, got)
// With filter - with Dir
clearCallback()
err = listR(f, "", false, ListDirs, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir"}, got)
// With filter and subdir
clearCallback()
err = listR(f, "dir", false, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir/b"}, got)
// Now bucket based
objects = fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b"),
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockobject.Object("dir/subdir/c"),
mockdir.New("dir/subdir"),
}
// Base case
clearCallback()
err = listR(f, "", true, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/subdir/c", "dir/subdir", "dir"}, got)
// With filter
clearCallback()
err = listR(f, "", false, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir/b", "dir/subdir", "dir"}, got)
// With filter and subdir
clearCallback()
err = listR(f, "dir", false, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/b", "dir/subdir"}, got)
// With filter and subdir - with Objects
clearCallback()
err = listR(f, "dir", false, ListObjects, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/b"}, got)
// With filter and subdir - with Dirs
clearCallback()
err = listR(f, "dir", false, ListDirs, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/subdir"}, got)
}
func TestDirMapAdd(t *testing.T) {
type add struct {
dir string
sent bool
}
for i, test := range []struct {
root string
in []add
want map[string]bool
}{
{
root: "",
in: []add{
{"", true},
},
want: map[string]bool{},
},
{
root: "",
in: []add{
{"a/b/c", true},
},
want: map[string]bool{
"a/b/c": true,
"a/b": false,
"a": false,
},
},
{
root: "",
in: []add{
{"a/b/c", true},
{"a/b", true},
},
want: map[string]bool{
"a/b/c": true,
"a/b": true,
"a": false,
},
},
{
root: "",
in: []add{
{"a/b", true},
{"a/b/c", false},
},
want: map[string]bool{
"a/b/c": false,
"a/b": true,
"a": false,
},
},
{
root: "root",
in: []add{
{"root/a/b", true},
{"root/a/b/c", false},
},
want: map[string]bool{
"root/a/b/c": false,
"root/a/b": true,
"root/a": false,
},
},
} {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
dm := newDirMap(test.root)
for _, item := range test.in {
dm.add(item.dir, item.sent)
}
assert.Equal(t, test.want, dm.m)
})
}
}
func TestDirMapAddEntries(t *testing.T) {
dm := newDirMap("")
entries := fs.DirEntries{
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockdir.New("dir"),
mockobject.Object("dir2/a"),
mockobject.Object("dir2/b"),
}
require.NoError(t, dm.addEntries(entries))
assert.Equal(t, map[string]bool{"dir": true, "dir2": false}, dm.m)
}
func TestDirMapSendEntries(t *testing.T) {
var got []string
clearCallback := func() {
got = nil
}
callback := func(entries fs.DirEntries) error {
for _, entry := range entries {
got = append(got, entry.Remote())
}
return nil
}
// general test
dm := newDirMap("")
entries := fs.DirEntries{
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockdir.New("dir"),
mockobject.Object("dir2/a"),
mockobject.Object("dir2/b"),
mockobject.Object("dir1/a"),
mockobject.Object("dir3/b"),
}
require.NoError(t, dm.addEntries(entries))
clearCallback()
err := dm.sendEntries(callback)
require.NoError(t, err)
assert.Equal(t, []string{
"dir1",
"dir2",
"dir3",
}, got)
// return error from callback
callback2 := func(entries fs.DirEntries) error {
return io.EOF
}
err = dm.sendEntries(callback2)
require.Equal(t, io.EOF, err)
// empty
dm = newDirMap("")
clearCallback()
err = dm.sendEntries(callback)
require.NoError(t, err)
assert.Equal(t, []string(nil), got)
}

View File

@@ -468,11 +468,8 @@ func Purge(f fs.Fs) {
} }
if doFallbackPurge { if doFallbackPurge {
dirs := []string{""} dirs := []string{""}
err = walk.Walk(f, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error { err = walk.ListR(f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
if err != nil { var err error
log.Printf("purge walk returned error: %v", err)
return nil
}
entries.ForObject(func(obj fs.Object) { entries.ForObject(func(obj fs.Object) {
fs.Debugf(f, "Purge object %q", obj.Remote()) fs.Debugf(f, "Purge object %q", obj.Remote())
err = obj.Remove() err = obj.Remove()

View File

@@ -782,6 +782,39 @@ func Run(t *testing.T, opt *Opt) {
TestFsListDirFile2(t) TestFsListDirFile2(t)
}) })
// Test the files are all there with walk.ListR recursive listings
t.Run("FsListR", func(t *testing.T) {
skipIfNotOk(t)
objs, dirs, err := walk.GetAll(remote, "", true, -1)
require.NoError(t, err)
assert.Equal(t, []string{
"hello_ sausage",
"hello_ sausage/êé",
"hello_ sausage/êé/Hello, 世界",
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠",
}, dirsToNames(dirs))
assert.Equal(t, []string{
"file name.txt",
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt",
}, objsToNames(objs))
})
// Test the files are all there with
// walk.ListR recursive listings on a sub dir
t.Run("FsListRSubdir", func(t *testing.T) {
skipIfNotOk(t)
objs, dirs, err := walk.GetAll(remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1)
require.NoError(t, err)
assert.Equal(t, []string{
"hello_ sausage/êé",
"hello_ sausage/êé/Hello, 世界",
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠",
}, dirsToNames(dirs))
assert.Equal(t, []string{
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt",
}, objsToNames(objs))
})
// TestFsListDirRoot tests that DirList works in the root // TestFsListDirRoot tests that DirList works in the root
TestFsListDirRoot := func(t *testing.T) { TestFsListDirRoot := func(t *testing.T) {
skipIfNotOk(t) skipIfNotOk(t)

View File

@@ -139,13 +139,7 @@ func newRunIndividual(t *testing.T, individual bool) *Run {
*r = *oneRun *r = *oneRun
r.cleanRemote = func() { r.cleanRemote = func() {
var toDelete []string var toDelete []string
err := walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error { err := walk.ListR(r.Fremote, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
if err != nil {
if err == fs.ErrorDirNotFound {
return nil
}
t.Fatalf("Error listing: %v", err)
}
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:

Some files were not shown because too many files have changed in this diff Show More