mirror of
https://github.com/rclone/rclone.git
synced 2026-02-02 01:33:24 +00:00
Compare commits
32 Commits
fix-111-me
...
v1.54-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0012b981c1 | ||
|
|
707cdaa604 | ||
|
|
e2531e08be | ||
|
|
86babc6393 | ||
|
|
d45d48cbe5 | ||
|
|
7ad2c22d5b | ||
|
|
0cfcc08be1 | ||
|
|
2c4a25de5b | ||
|
|
f5a95b2ad0 | ||
|
|
f2caa0eabb | ||
|
|
4943a5028c | ||
|
|
60bebe4b35 | ||
|
|
61031cfdea | ||
|
|
da7e4379fa | ||
|
|
7e7a91ce3d | ||
|
|
6baa4e2947 | ||
|
|
3f53283ebf | ||
|
|
da9dd543e4 | ||
|
|
e3cf4f82eb | ||
|
|
406e26c7b7 | ||
|
|
f4214882ab | ||
|
|
231ab31d2a | ||
|
|
f76bc86cc8 | ||
|
|
2d11f5672d | ||
|
|
cf0563f99e | ||
|
|
65f691f4de | ||
|
|
f627d42a51 | ||
|
|
f08e43fb77 | ||
|
|
cd7611e7ce | ||
|
|
42f28f9458 | ||
|
|
92046b457f | ||
|
|
098de1cff5 |
34
.github/workflows/build.yml
vendored
34
.github/workflows/build.yml
vendored
@@ -19,12 +19,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.12', 'go1.13', 'go1.14', 'go1.16']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -32,25 +32,18 @@ jobs:
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.16.x'
|
||||
go: '1.15.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.16.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.16.x'
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
@@ -60,7 +53,7 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.16.x'
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
@@ -71,11 +64,16 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
go: '1.15.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
@@ -87,9 +85,9 @@ jobs:
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.15
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -131,7 +129,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
brew install --cask osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
|
||||
@@ -72,7 +72,7 @@ Make sure you
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push -u origin my-new-feature
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the GitHub website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
@@ -99,7 +99,7 @@ rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
@@ -115,8 +115,8 @@ are skipped if `TestDrive:` isn't defined.
|
||||
cd backend/drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which test all of rclone's
|
||||
operations. Normally these get run against the local file system,
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
@@ -127,7 +127,7 @@ but they can be run against any of the remotes.
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
altogether with an HTML report and test retries then from the
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
@@ -202,7 +202,7 @@ for the flag help, the remainder is shown to the user in `rclone
|
||||
config` and is added to the docs with `make backenddocs`.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||
files. The MANUAL.*, rclone.1, web site, etc. are all auto generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
@@ -265,7 +265,7 @@ rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
support in go1.11 and later to manage its dependencies.
|
||||
|
||||
rclone can be built with modules outside of the `GOPATH`.
|
||||
rclone can be built with modules outside of the GOPATH
|
||||
|
||||
To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
@@ -333,8 +333,8 @@ Getting going
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Unit tests
|
||||
@@ -400,7 +400,7 @@ Usage
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
|
||||
Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
|
||||
@@ -16,8 +16,6 @@ RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
|
||||
|
||||
ENTRYPOINT [ "rclone" ]
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
302
MANUAL.html
generated
302
MANUAL.html
generated
@@ -17,7 +17,7 @@
|
||||
<header id="title-block-header">
|
||||
<h1 class="title">rclone(1) User Manual</h1>
|
||||
<p class="author">Nick Craig-Wood</p>
|
||||
<p class="date">Feb 02, 2021</p>
|
||||
<p class="date">Mar 08, 2021</p>
|
||||
</header>
|
||||
<h1 id="rclone-syncs-your-files-to-cloud-storage">Rclone syncs your files to cloud storage</h1>
|
||||
<p><img width="50%" src="https://rclone.org/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" ></p>
|
||||
@@ -1475,7 +1475,7 @@ rclone mount remote:path/to/files * --volname \\cloud\remote</code></pre>
|
||||
<p>The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2, Hubic) do not support the concept of empty directories, so empty directories will have a tendency to disappear once they fall out of the directory cache.</p>
|
||||
<p>Only supported on Linux, FreeBSD, OS X and Windows at the moment.</p>
|
||||
<h2 id="rclone-mount-vs-rclone-synccopy">rclone mount vs rclone sync/copy</h2>
|
||||
<p>File systems expect things to be 100% reliable, whereas cloud storage systems are a long way from 100% reliable. The rclone sync/copy commands cope with this with lots of retries. However rclone mount can't use retries in the same way without making local copies of the uploads. Look at the <a href="#vfs-file-caching">VFS file caching</a> section for solutions to make mount more reliable.</p>
|
||||
<p>File systems expect things to be 100% reliable, whereas cloud storage systems are a long way from 100% reliable. The rclone sync/copy commands cope with this with lots of retries. However rclone mount can't use retries in the same way without making local copies of the uploads. Look at the <a href="#vfs-file-caching">VFS File Caching</a> for solutions to make mount more reliable.</p>
|
||||
<h2 id="attribute-caching">Attribute caching</h2>
|
||||
<p>You can use the flag <code>--attr-timeout</code> to set the time the kernel caches the attributes (size, modification time, etc.) for directory entries.</p>
|
||||
<p>The default is <code>1s</code> which caches files just long enough to avoid too many callbacks to rclone from the kernel.</p>
|
||||
@@ -1526,6 +1526,7 @@ rclone mount remote:path/to/files * --volname \\cloud\remote</code></pre>
|
||||
<p>The cache has 4 different modes selected by <code>--vfs-cache-mode</code>. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.</p>
|
||||
<p>Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back second. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.</p>
|
||||
<p>If using <code>--vfs-cache-max-size</code> note that the cache may exceed this size for two reasons. Firstly because it is only checked every <code>--vfs-cache-poll-interval</code>. Secondly because open files cannot be evicted from the cache.</p>
|
||||
<p>You <strong>should not</strong> run two copies of rclone using the same VFS cache with the same or overlapping remotes if using <code>--vfs-cache-mode > off</code>. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with <code>--cache-dir</code>. You don't need to worry about this if the remotes in use don't overlap.</p>
|
||||
<h3 id="vfs-cache-mode-off">--vfs-cache-mode off</h3>
|
||||
<p>In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.</p>
|
||||
<p>This will mean some operations are not possible</p>
|
||||
@@ -1848,6 +1849,7 @@ ffmpeg - | rclone rcat remote:path/to/file</code></pre>
|
||||
<p>The cache has 4 different modes selected by <code>--vfs-cache-mode</code>. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.</p>
|
||||
<p>Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back second. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.</p>
|
||||
<p>If using <code>--vfs-cache-max-size</code> note that the cache may exceed this size for two reasons. Firstly because it is only checked every <code>--vfs-cache-poll-interval</code>. Secondly because open files cannot be evicted from the cache.</p>
|
||||
<p>You <strong>should not</strong> run two copies of rclone using the same VFS cache with the same or overlapping remotes if using <code>--vfs-cache-mode > off</code>. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with <code>--cache-dir</code>. You don't need to worry about this if the remotes in use don't overlap.</p>
|
||||
<h3 id="vfs-cache-mode-off-1">--vfs-cache-mode off</h3>
|
||||
<p>In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.</p>
|
||||
<p>This will mean some operations are not possible</p>
|
||||
@@ -1982,6 +1984,7 @@ ffmpeg - | rclone rcat remote:path/to/file</code></pre>
|
||||
<p>The cache has 4 different modes selected by <code>--vfs-cache-mode</code>. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.</p>
|
||||
<p>Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back second. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.</p>
|
||||
<p>If using <code>--vfs-cache-max-size</code> note that the cache may exceed this size for two reasons. Firstly because it is only checked every <code>--vfs-cache-poll-interval</code>. Secondly because open files cannot be evicted from the cache.</p>
|
||||
<p>You <strong>should not</strong> run two copies of rclone using the same VFS cache with the same or overlapping remotes if using <code>--vfs-cache-mode > off</code>. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with <code>--cache-dir</code>. You don't need to worry about this if the remotes in use don't overlap.</p>
|
||||
<h3 id="vfs-cache-mode-off-2">--vfs-cache-mode off</h3>
|
||||
<p>In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.</p>
|
||||
<p>This will mean some operations are not possible</p>
|
||||
@@ -2246,6 +2249,7 @@ htpasswd -B htpasswd anotherUser</code></pre>
|
||||
<p>The cache has 4 different modes selected by <code>--vfs-cache-mode</code>. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.</p>
|
||||
<p>Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back second. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.</p>
|
||||
<p>If using <code>--vfs-cache-max-size</code> note that the cache may exceed this size for two reasons. Firstly because it is only checked every <code>--vfs-cache-poll-interval</code>. Secondly because open files cannot be evicted from the cache.</p>
|
||||
<p>You <strong>should not</strong> run two copies of rclone using the same VFS cache with the same or overlapping remotes if using <code>--vfs-cache-mode > off</code>. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with <code>--cache-dir</code>. You don't need to worry about this if the remotes in use don't overlap.</p>
|
||||
<h3 id="vfs-cache-mode-off-3">--vfs-cache-mode off</h3>
|
||||
<p>In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.</p>
|
||||
<p>This will mean some operations are not possible</p>
|
||||
@@ -2558,6 +2562,7 @@ htpasswd -B htpasswd anotherUser</code></pre>
|
||||
<p>The cache has 4 different modes selected by <code>--vfs-cache-mode</code>. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.</p>
|
||||
<p>Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back second. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.</p>
|
||||
<p>If using <code>--vfs-cache-max-size</code> note that the cache may exceed this size for two reasons. Firstly because it is only checked every <code>--vfs-cache-poll-interval</code>. Secondly because open files cannot be evicted from the cache.</p>
|
||||
<p>You <strong>should not</strong> run two copies of rclone using the same VFS cache with the same or overlapping remotes if using <code>--vfs-cache-mode > off</code>. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with <code>--cache-dir</code>. You don't need to worry about this if the remotes in use don't overlap.</p>
|
||||
<h3 id="vfs-cache-mode-off-4">--vfs-cache-mode off</h3>
|
||||
<p>In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.</p>
|
||||
<p>This will mean some operations are not possible</p>
|
||||
@@ -2823,6 +2828,7 @@ htpasswd -B htpasswd anotherUser</code></pre>
|
||||
<p>The cache has 4 different modes selected by <code>--vfs-cache-mode</code>. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.</p>
|
||||
<p>Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back second. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.</p>
|
||||
<p>If using <code>--vfs-cache-max-size</code> note that the cache may exceed this size for two reasons. Firstly because it is only checked every <code>--vfs-cache-poll-interval</code>. Secondly because open files cannot be evicted from the cache.</p>
|
||||
<p>You <strong>should not</strong> run two copies of rclone using the same VFS cache with the same or overlapping remotes if using <code>--vfs-cache-mode > off</code>. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with <code>--cache-dir</code>. You don't need to worry about this if the remotes in use don't overlap.</p>
|
||||
<h3 id="vfs-cache-mode-off-5">--vfs-cache-mode off</h3>
|
||||
<p>In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.</p>
|
||||
<p>This will mean some operations are not possible</p>
|
||||
@@ -4085,7 +4091,7 @@ dir1/dir2/dir3/.ignore</code></pre>
|
||||
<p>The command <code>rclone ls --exclude-if-present .ignore dir1</code> does not list <code>dir3</code>, <code>file3</code> or <code>.ignore</code>.</p>
|
||||
<p><code>--exclude-if-present</code> can only be used once in an rclone command.</p>
|
||||
<h2 id="common-pitfalls">Common pitfalls</h2>
|
||||
<p>The most frequent filter support issues on the <a href="https://https://forum.rclone.org/">rclone forum</a> are:</p>
|
||||
<p>The most frequent filter support issues on the <a href="https://forum.rclone.org/">rclone forum</a> are:</p>
|
||||
<ul>
|
||||
<li>Not using paths relative to the root of the remote</li>
|
||||
<li>Not using <code>/</code> to match from the root of a remote</li>
|
||||
@@ -4997,6 +5003,7 @@ rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"Cache
|
||||
<ul>
|
||||
<li>srcFs - a remote name string e.g. "drive:src" for the source</li>
|
||||
<li>dstFs - a remote name string e.g. "drive:dst" for the destination</li>
|
||||
<li>createEmptySrcDirs - create empty src directories on destination if set</li>
|
||||
</ul>
|
||||
<p>See the <a href="https://rclone.org/commands/rclone_copy/">copy command</a> command for more information on the above.</p>
|
||||
<p><strong>Authentication is required for this call.</strong></p>
|
||||
@@ -5005,6 +5012,7 @@ rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"Cache
|
||||
<ul>
|
||||
<li>srcFs - a remote name string e.g. "drive:src" for the source</li>
|
||||
<li>dstFs - a remote name string e.g. "drive:dst" for the destination</li>
|
||||
<li>createEmptySrcDirs - create empty src directories on destination if set</li>
|
||||
<li>deleteEmptySrcDirs - delete empty src directories if set</li>
|
||||
</ul>
|
||||
<p>See the <a href="https://rclone.org/commands/rclone_move/">move command</a> command for more information on the above.</p>
|
||||
@@ -5014,6 +5022,7 @@ rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"Cache
|
||||
<ul>
|
||||
<li>srcFs - a remote name string e.g. "drive:src" for the source</li>
|
||||
<li>dstFs - a remote name string e.g. "drive:dst" for the destination</li>
|
||||
<li>createEmptySrcDirs - create empty src directories on destination if set</li>
|
||||
</ul>
|
||||
<p>See the <a href="https://rclone.org/commands/rclone_sync/">sync command</a> command for more information on the above.</p>
|
||||
<p><strong>Authentication is required for this call.</strong></p>
|
||||
@@ -6491,7 +6500,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.1")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)</code></pre>
|
||||
<h2 id="backend-flags">Backend Flags</h2>
|
||||
<p>These flags are available for every command. They control the backends and may be set in the config file.</p>
|
||||
@@ -6616,7 +6625,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
||||
--drive-starred-only Only show files that are starred.
|
||||
--drive-stop-on-download-limit Make download limit errors be fatal
|
||||
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-team-drive string ID of the Shared Drive (Team Drive)
|
||||
--drive-token string OAuth Access Token as a JSON blob.
|
||||
--drive-token-url string Token server url.
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
@@ -6897,8 +6906,13 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
||||
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-token string OAuth Access Token as a JSON blob.
|
||||
--yandex-token-url string Token server url.
|
||||
--zoho-auth-url string Auth server URL.
|
||||
--zoho-client-id string OAuth Client Id
|
||||
--zoho-client-secret string OAuth Client Secret
|
||||
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
|
||||
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.</code></pre>
|
||||
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
--zoho-token string OAuth Access Token as a JSON blob.
|
||||
--zoho-token-url string Token server url.</code></pre>
|
||||
<h2 id="fichier">1Fichier</h2>
|
||||
<p>This is a backend for the <a href="https://1fichier.com">1fichier</a> cloud storage service. Note that a Premium subscription is required to use the API.</p>
|
||||
<p>Paths are specified as <code>remote:path</code></p>
|
||||
@@ -7575,6 +7589,10 @@ y/e/d> </code></pre>
|
||||
<pre><code>rclone copy --min-age 24h --no-traverse /path/to/source s3:bucket</code></pre>
|
||||
<p>You'd then do a full <code>rclone sync</code> less often.</p>
|
||||
<p>Note that <code>--fast-list</code> isn't required in the top-up sync.</p>
|
||||
<h4 id="avoiding-head-requests-after-put">Avoiding HEAD requests after PUT</h4>
|
||||
<p>By default rclone will HEAD every object it uploads. It does this to check the object got uploaded correctly.</p>
|
||||
<p>You can disable this with the <a href="#s3-no-head">--s3-no-head</a> option - see there for more details.</p>
|
||||
<p>Setting this flag increases the chance for undetected upload failures.</p>
|
||||
<h3 id="hashes">Hashes</h3>
|
||||
<p>For small objects which weren't uploaded as multipart uploads (objects sized below <code>--s3-upload-cutoff</code> if uploaded with rclone) rclone uses the <code>ETag:</code> header as an MD5 checksum.</p>
|
||||
<p>However for objects which were uploaded as multipart uploads or with server side encryption (SSE-AWS or SSE-C) the <code>ETag</code> header is no longer the MD5 sum of the data, so rclone adds an additional piece of metadata <code>X-Amz-Meta-Md5chksum</code> which is a base64 encoded MD5 hash (in the same format as is required for <code>Content-MD5</code>).</p>
|
||||
@@ -10735,7 +10753,7 @@ chunk_total_size = 10G</code></pre>
|
||||
<h5 id="certificate-validation">Certificate Validation</h5>
|
||||
<p>When the Plex server is configured to only accept secure connections, it is possible to use <code>.plex.direct</code> URLs to ensure certificate validation succeeds. These URLs are used by Plex internally to connect to the Plex server securely.</p>
|
||||
<p>The format for these URLs is the following:</p>
|
||||
<p>https://ip-with-dots-replaced.server-hash.plex.direct:32400/</p>
|
||||
<p><code>https://ip-with-dots-replaced.server-hash.plex.direct:32400/</code></p>
|
||||
<p>The <code>ip-with-dots-replaced</code> part can be any IPv4 address, where the dots have been replaced with dashes, e.g. <code>127.0.0.1</code> becomes <code>127-0-0-1</code>.</p>
|
||||
<p>To get the <code>server-hash</code> part, the easiest way is to visit</p>
|
||||
<p>https://plex.tv/api/resources?includeHttps=1&X-Plex-Token=your-plex-token</p>
|
||||
@@ -11491,7 +11509,7 @@ y/e/d> y</code></pre>
|
||||
<p>To use <code>crypt</code>, first set up the underlying remote. Follow the <code>rclone config</code> instructions for the specific backend.</p>
|
||||
<p>Before configuring the crypt remote, check the underlying remote is working. In this example the underlying remote is called <code>remote</code>. We will configure a path <code>path</code> within this remote to contain the encrypted content. Anything inside <code>remote:path</code> will be encrypted and anything outside will not.</p>
|
||||
<p>Configure <code>crypt</code> using <code>rclone config</code>. In this example the <code>crypt</code> remote is called <code>secret</code>, to differentiate it from the underlying <code>remote</code>.</p>
|
||||
<p>When you are done you can use the crypt remote named <code>secret</code> just as you would with any other remote, e.g. <code>rclone copy D:\docs secret:\docs</code>, and rclone will encrypt and decrypt as needed on the fly. If you access the wrapped remote <code>remote:path</code> directly you will bypass the encryption, and anything you read will be in encrypted form, and anything you write will be undencrypted. To avoid issues it is best to configure a dedicated path for encrypted content, and access it exclusively through a crypt remote.</p>
|
||||
<p>When you are done you can use the crypt remote named <code>secret</code> just as you would with any other remote, e.g. <code>rclone copy D:\docs secret:\docs</code>, and rclone will encrypt and decrypt as needed on the fly. If you access the wrapped remote <code>remote:path</code> directly you will bypass the encryption, and anything you read will be in encrypted form, and anything you write will be unencrypted. To avoid issues it is best to configure a dedicated path for encrypted content, and access it exclusively through a crypt remote.</p>
|
||||
<pre><code>No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
@@ -12125,6 +12143,9 @@ y/e/d> y</code></pre>
|
||||
</ul>
|
||||
<h4 id="dropbox-impersonate">--dropbox-impersonate</h4>
|
||||
<p>Impersonate this user when using a business account.</p>
|
||||
<p>Note that if you want to use impersonate, you should make sure this flag is set when running "rclone config" as this will cause rclone to request the "members.read" scope which it won't normally. This is needed to lookup a members email address into the internal ID that dropbox uses in the API.</p>
|
||||
<p>Using the "members.read" scope will require a Dropbox Team Admin to approve during the OAuth flow.</p>
|
||||
<p>You will have to use your own App (setting your own client_id and client_secret) to use this option as currently rclone's default set of permissions doesn't include "members.read". This can be added once v1.55 or later is in use everywhere.</p>
|
||||
<ul>
|
||||
<li>Config: impersonate</li>
|
||||
<li>Env Var: RCLONE_DROPBOX_IMPERSONATE</li>
|
||||
@@ -12428,7 +12449,7 @@ y/e/d> y</code></pre>
|
||||
<h3 id="example-without-a-config-file">Example without a config file</h3>
|
||||
<pre><code>rclone lsf :ftp: --ftp-host=speedtest.tele2.net --ftp-user=anonymous --ftp-pass=`rclone obscure dummy`</code></pre>
|
||||
<h3 id="implicit-tls">Implicit TLS</h3>
|
||||
<p>Rlone FTP supports implicit FTP over TLS servers (FTPS). This has to be enabled in the FTP backend config for the remote, or with <code>[--ftp-tls]{#ftp-tls}</code>. The default FTPS port is <code>990</code>, not <code>21</code> and can be set with <code>[--ftp-port]{#ftp-port}</code>.</p>
|
||||
<p>Rlone FTP supports implicit FTP over TLS servers (FTPS). This has to be enabled in the FTP backend config for the remote, or with <a href="#ftp-tls"><code>--ftp-tls</code></a>. The default FTPS port is <code>990</code>, not <code>21</code> and can be set with <a href="#ftp-port"><code>--ftp-port</code></a>.</p>
|
||||
<h3 id="standard-options-13">Standard Options</h3>
|
||||
<p>Here are the standard options specific to ftp (FTP Connection).</p>
|
||||
<h4 id="ftp-host">--ftp-host</h4>
|
||||
@@ -13132,7 +13153,7 @@ If your browser doesn't open automatically go to the following link: http://
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
Configure this as a team drive?
|
||||
Configure this as a Shared Drive (Team Drive)?
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
@@ -13233,15 +13254,15 @@ y/n> # Auto config, y
|
||||
</ul></li>
|
||||
</ul>
|
||||
<p>Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using <code>--drive-impersonate</code>, do this instead: - in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step #1 - use rclone without specifying the <code>--drive-impersonate</code> option, like this: <code>rclone -v foo@example.com lsf gdrive:backup</code></p>
|
||||
<h3 id="team-drives">Team drives</h3>
|
||||
<p>If you want to configure the remote to point to a Google Team Drive then answer <code>y</code> to the question <code>Configure this as a team drive?</code>.</p>
|
||||
<p>This will fetch the list of Team Drives from google and allow you to configure which one you want to use. You can also type in a team drive ID if you prefer.</p>
|
||||
<h3 id="shared-drives-team-drives">Shared drives (team drives)</h3>
|
||||
<p>If you want to configure the remote to point to a Google Shared Drive (previously known as Team Drives) then answer <code>y</code> to the question <code>Configure this as a Shared Drive (Team Drive)?</code>.</p>
|
||||
<p>This will fetch the list of Shared Drives from google and allow you to configure which one you want to use. You can also type in a Shared Drive ID if you prefer.</p>
|
||||
<p>For example:</p>
|
||||
<pre><code>Configure this as a team drive?
|
||||
<pre><code>Configure this as a Shared Drive (Team Drive)?
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Fetching team drive list...
|
||||
Fetching Shared Drive list...
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Rclone Test
|
||||
\ "xxxxxxxxxxxxxxxxxxxx"
|
||||
@@ -13249,7 +13270,7 @@ Choose a number from below, or type in your own value
|
||||
\ "yyyyyyyyyyyyyyyyyyyy"
|
||||
3 / Rclone Test 3
|
||||
\ "zzzzzzzzzzzzzzzzzzzz"
|
||||
Enter a Team Drive ID> 1
|
||||
Enter a Shared Drive ID> 1
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
@@ -13659,7 +13680,7 @@ trashed=false and 'c' in parents</code></pre>
|
||||
<li>Default: ""</li>
|
||||
</ul>
|
||||
<h4 id="drive-team-drive">--drive-team-drive</h4>
|
||||
<p>ID of the Team Drive</p>
|
||||
<p>ID of the Shared Drive (Team Drive)</p>
|
||||
<ul>
|
||||
<li>Config: team_drive</li>
|
||||
<li>Env Var: RCLONE_DRIVE_TEAM_DRIVE</li>
|
||||
@@ -13972,9 +13993,9 @@ rclone backend shortcut drive: source_item -o target=drive2: destination_shortcu
|
||||
<li>"target": optional target remote for the shortcut destination</li>
|
||||
</ul>
|
||||
<h4 id="drives">drives</h4>
|
||||
<p>List the shared drives available to this account</p>
|
||||
<p>List the Shared Drives available to this account</p>
|
||||
<pre><code>rclone backend drives remote: [options] [<arguments>+]</code></pre>
|
||||
<p>This command lists the shared drives (teamdrives) available to this account.</p>
|
||||
<p>This command lists the Shared Drives (Team Drives) available to this account.</p>
|
||||
<p>Usage:</p>
|
||||
<pre><code>rclone backend drives drive:</code></pre>
|
||||
<p>This will return a JSON list of objects like this</p>
|
||||
@@ -18173,7 +18194,7 @@ known_hosts_file = ~/.ssh/known_hosts</code></pre>
|
||||
<p>SFTP also supports <code>about</code> if the same login has shell access and <code>df</code> are in the remote's PATH. <code>about</code> will return the total space, free space, and used space on the remote for the disk of the specified path on the remote or, if not set, the disk of the root on the remote. <code>about</code> will fail if it does not have shell access or if <code>df</code> is not in the remote's PATH.</p>
|
||||
<p>Note that some SFTP servers (e.g. Synology) the paths are different for SSH and SFTP so the hashes can't be calculated properly. For them using <code>disable_hashcheck</code> is a good idea.</p>
|
||||
<p>The only ssh agent supported under Windows is Putty's pageant.</p>
|
||||
<p>The Go SSH library disables the use of the aes128-cbc cipher by default, due to security concerns. This can be re-enabled on a per-connection basis by setting the <code>use_insecure_cipher</code> setting in the configuration file to <code>true</code>. Further details on the insecurity of this cipher can be found [in this paper] (http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf).</p>
|
||||
<p>The Go SSH library disables the use of the aes128-cbc cipher by default, due to security concerns. This can be re-enabled on a per-connection basis by setting the <code>use_insecure_cipher</code> setting in the configuration file to <code>true</code>. Further details on the insecurity of this cipher can be found <a href="http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf">in this paper</a>.</p>
|
||||
<p>SFTP isn't supported under plan9 until <a href="https://github.com/pkg/sftp/issues/156">this issue</a> is fixed.</p>
|
||||
<p>Note that since SFTP isn't HTTP based the following flags don't work with it: <code>--dump-headers</code>, <code>--dump-bodies</code>, <code>--dump-auth</code></p>
|
||||
<p>Note that <code>--timeout</code> isn't supported (but <code>--contimeout</code> is).</p>
|
||||
@@ -19267,6 +19288,22 @@ y/e/d> </code></pre>
|
||||
<p>Only control characters and invalid UTF-8 are replaced. In addition most Unicode full-width characters are not supported at all and will be removed from filenames during upload.</p>
|
||||
<h3 id="standard-options-37">Standard Options</h3>
|
||||
<p>Here are the standard options specific to zoho (Zoho).</p>
|
||||
<h4 id="zoho-client-id">--zoho-client-id</h4>
|
||||
<p>OAuth Client Id Leave blank normally.</p>
|
||||
<ul>
|
||||
<li>Config: client_id</li>
|
||||
<li>Env Var: RCLONE_ZOHO_CLIENT_ID</li>
|
||||
<li>Type: string</li>
|
||||
<li>Default: ""</li>
|
||||
</ul>
|
||||
<h4 id="zoho-client-secret">--zoho-client-secret</h4>
|
||||
<p>OAuth Client Secret Leave blank normally.</p>
|
||||
<ul>
|
||||
<li>Config: client_secret</li>
|
||||
<li>Env Var: RCLONE_ZOHO_CLIENT_SECRET</li>
|
||||
<li>Type: string</li>
|
||||
<li>Default: ""</li>
|
||||
</ul>
|
||||
<h4 id="zoho-region">--zoho-region</h4>
|
||||
<p>Zoho region to connect to. You'll have to use the region you organization is registered in.</p>
|
||||
<ul>
|
||||
@@ -19296,6 +19333,30 @@ y/e/d> </code></pre>
|
||||
</ul>
|
||||
<h3 id="advanced-options-36">Advanced Options</h3>
|
||||
<p>Here are the advanced options specific to zoho (Zoho).</p>
|
||||
<h4 id="zoho-token">--zoho-token</h4>
|
||||
<p>OAuth Access Token as a JSON blob.</p>
|
||||
<ul>
|
||||
<li>Config: token</li>
|
||||
<li>Env Var: RCLONE_ZOHO_TOKEN</li>
|
||||
<li>Type: string</li>
|
||||
<li>Default: ""</li>
|
||||
</ul>
|
||||
<h4 id="zoho-auth-url">--zoho-auth-url</h4>
|
||||
<p>Auth server URL. Leave blank to use the provider defaults.</p>
|
||||
<ul>
|
||||
<li>Config: auth_url</li>
|
||||
<li>Env Var: RCLONE_ZOHO_AUTH_URL</li>
|
||||
<li>Type: string</li>
|
||||
<li>Default: ""</li>
|
||||
</ul>
|
||||
<h4 id="zoho-token-url">--zoho-token-url</h4>
|
||||
<p>Token server url. Leave blank to use the provider defaults.</p>
|
||||
<ul>
|
||||
<li>Config: token_url</li>
|
||||
<li>Env Var: RCLONE_ZOHO_TOKEN_URL</li>
|
||||
<li>Type: string</li>
|
||||
<li>Default: ""</li>
|
||||
</ul>
|
||||
<h4 id="zoho-encoding">--zoho-encoding</h4>
|
||||
<p>This sets the encoding for the backend.</p>
|
||||
<p>See: the <a href="https://rclone.org/overview/#encoding">encoding section in the overview</a> for more info.</p>
|
||||
@@ -19824,12 +19885,68 @@ $ tree /tmp/b
|
||||
<li>"error": return an error based on option value</li>
|
||||
</ul>
|
||||
<h1 id="changelog">Changelog</h1>
|
||||
<h2 id="v1.54.1---2021-03-08">v1.54.1 - 2021-03-08</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.54.0...v1.54.1">See commits</a></p>
|
||||
<ul>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>accounting: Fix --bwlimit when up or down is off (Nick Craig-Wood)</li>
|
||||
<li>docs
|
||||
<ul>
|
||||
<li>Fix nesting of brackets and backticks in ftp docs (edwardxml)</li>
|
||||
<li>Fix broken link in sftp page (edwardxml)</li>
|
||||
<li>Fix typo in crypt.md (Romeo Kienzler)</li>
|
||||
<li>Changelog: Correct link to digitalis.io (Alex JOST)</li>
|
||||
<li>Replace #file-caching with #vfs-file-caching (Miron Veryanskiy)</li>
|
||||
<li>Convert bogus example link to code (edwardxml)</li>
|
||||
<li>Remove dead link from rc.md (edwardxml)</li>
|
||||
</ul></li>
|
||||
<li>rc: Sync,copy,move: document createEmptySrcDirs parameter (Nick Craig-Wood)</li>
|
||||
<li>lsjson: Fix unterminated JSON in the presence of errors (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Mount
|
||||
<ul>
|
||||
<li>Fix mount dropping on macOS by setting --daemon-timeout 10m (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>VFS
|
||||
<ul>
|
||||
<li>Document simultaneous usage with the same cache shouldn't be used (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>B2
|
||||
<ul>
|
||||
<li>Automatically raise upload cutoff to avoid spurious error (Nick Craig-Wood)</li>
|
||||
<li>Fix failed to create file system with application key limited to a prefix (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Drive
|
||||
<ul>
|
||||
<li>Refer to Shared Drives instead of Team Drives (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Dropbox
|
||||
<ul>
|
||||
<li>Add scopes to oauth request and optionally "members.read" (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>S3
|
||||
<ul>
|
||||
<li>Fix failed to create file system with folder level permissions policy (Nick Craig-Wood)</li>
|
||||
<li>Fix Wasabi HEAD requests returning stale data by using only 1 transport (Nick Craig-Wood)</li>
|
||||
<li>Fix shared_credentials_file auth (Dmitry Chepurovskiy)</li>
|
||||
<li>Add --s3-no-head to reducing costs docs (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Union
|
||||
<ul>
|
||||
<li>Fix mkdir at root with remote:/ (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Zoho
|
||||
<ul>
|
||||
<li>Fix custom client id's (buengese)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.54.0---2021-02-02">v1.54.0 - 2021-02-02</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.53.0...v1.54.0">See commits</a></p>
|
||||
<ul>
|
||||
<li>New backends
|
||||
<ul>
|
||||
<li>Compression remote (experimental)(buengese)</li>
|
||||
<li>Compression remote (experimental) (buengese)</li>
|
||||
<li>Enterprise File Fabric (Nick Craig-Wood)
|
||||
<ul>
|
||||
<li>This work was sponsored by <a href="https://storagemadeeasy.com/">Storage Made Easy</a></li>
|
||||
@@ -19842,8 +19959,8 @@ $ tree /tmp/b
|
||||
<li>Deglobalise the config (Nick Craig-Wood)
|
||||
<ul>
|
||||
<li>Global config now read from the context</li>
|
||||
<li>Global config can be passed into the rc</li>
|
||||
<li>This work was sponsored by <a href="digitalis.io">Digitalis</a></li>
|
||||
<li>This will enable passing of global config via the rc</li>
|
||||
<li>This work was sponsored by <a href="https://digitalis.io/">Digitalis</a></li>
|
||||
</ul></li>
|
||||
<li>Add <code>--bwlimit</code> for upload and download (Nick Craig-Wood)
|
||||
<ul>
|
||||
@@ -19851,48 +19968,38 @@ $ tree /tmp/b
|
||||
</ul></li>
|
||||
<li>Enhance systemd integration (Hekmon)
|
||||
<ul>
|
||||
<li>log level identification</li>
|
||||
<li>manual activation with flag</li>
|
||||
<li>automatic systemd launch detection</li>
|
||||
<li>log level identification, manual activation with flag, automatic systemd launch detection</li>
|
||||
<li>Don't compile systemd log integration for non unix systems (Benjamin Gustin)</li>
|
||||
</ul></li>
|
||||
<li>Add a download flag to hashsum and related commands to force rclone to download and hash files locally (lostheli)</li>
|
||||
<li>Add a <code>--download</code> flag to md5sum/sha1sum/hashsum to force rclone to download and hash files locally (lostheli)</li>
|
||||
<li>Add <code>--progress-terminal-title</code> to print ETA to terminal title (LaSombra)</li>
|
||||
<li>Make backend env vars show in help as the defaults for backend flags (Nick Craig-Wood)</li>
|
||||
<li>build
|
||||
<ul>
|
||||
<li>Raise minimum go version to go1.12 (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>check
|
||||
<ul>
|
||||
<li>Make the error count match up in the log message (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>cmd
|
||||
<ul>
|
||||
<li>Add --progress-terminal-title to print ETA to terminal title (LaSombra)</li>
|
||||
<li>Make backend env vars show in help as the defaults for backend flags (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>dedupe
|
||||
<ul>
|
||||
<li>Add --by-hash to dedupe on hash not file name (Nick Craig-Wood)</li>
|
||||
<li>Add --dedupe-mode list to just list dupes, changing nothing (Nick Craig-Wood)</li>
|
||||
<li>Add <code>--by-hash</code> to dedupe on content hash not file name (Nick Craig-Wood)</li>
|
||||
<li>Add <code>--dedupe-mode list</code> to just list dupes, changing nothing (Nick Craig-Wood)</li>
|
||||
<li>Add warning if used on a remote which can't have duplicate names (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>flags: Improve error message when reading environment vars (Nick Craig-Wood)</li>
|
||||
<li>fs
|
||||
<ul>
|
||||
<li>Add Shutdown optional method for backends (Nick Craig-Wood)</li>
|
||||
<li>When using --files-from check files concurrently (zhucan)</li>
|
||||
<li>Accumulate stats when using --dry-run (Ingo Weiss)</li>
|
||||
<li>Always show stats when using --dry-run or --interactive (Nick Craig-Wood)</li>
|
||||
<li>Add support for flag --no-console on windows to hide the console window (albertony)</li>
|
||||
<li>When using <code>--files-from</code> check files concurrently (zhucan)</li>
|
||||
<li>Accumulate stats when using <code>--dry-run</code> (Ingo Weiss)</li>
|
||||
<li>Always show stats when using <code>--dry-run</code> or <code>--interactive</code> (Nick Craig-Wood)</li>
|
||||
<li>Add support for flag <code>--no-console</code> on windows to hide the console window (albertony)</li>
|
||||
</ul></li>
|
||||
<li>genautocomplete: Add support to output to stdout (Ingo)</li>
|
||||
<li>ncdu
|
||||
<ul>
|
||||
<li>Highlight read errors instead of aborting (Claudio Bantaloukas)</li>
|
||||
<li>Add sort by average size in directory (Adam Plánský)</li>
|
||||
<li>Add toggle option for average size in directory - key 'a' (Adam Plánský)</li>
|
||||
<li>Add toggle option for average s3ize in directory - key 'a' (Adam Plánský)</li>
|
||||
<li>Add empty folder flag into ncdu browser (Adam Plánský)</li>
|
||||
<li>Add ! (errror) and . (unreadable) file flags to go with e (empty) (Nick Craig-Wood)</li>
|
||||
<li>Add <code>!</code> (errror) and <code>.</code> (unreadable) file flags to go with <code>e</code> (empty) (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>obscure: Make <code>rclone osbcure -</code> ignore newline at end of line (Nick Craig-Wood)</li>
|
||||
<li>operations
|
||||
@@ -19919,36 +20026,32 @@ $ tree /tmp/b
|
||||
</ul></li>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>build
|
||||
<ul>
|
||||
<li>Explicitly set ARM version to fix build (Nick Craig-Wood)</li>
|
||||
<li>Don't explicitly set ARM version to fix ARMv5 build (Nick Craig-Wood)</li>
|
||||
<li>Fix nfpm install (Nick Craig-Wood)</li>
|
||||
<li>Fix docker build by upgrading ilteoood/docker_buildx (Nick Craig-Wood)</li>
|
||||
<li>Temporary fix for Windows build errors (Ivan Andreev)</li>
|
||||
</ul></li>
|
||||
<li>fs
|
||||
<ul>
|
||||
<li>Fix nil pointer on copy & move operations directly to remote (Anagh Kumar Baranwal)</li>
|
||||
<li>Fix parsing of .. when joining remotes (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>log: Fix enabling systemd logging when using --log-file (Nick Craig-Wood)</li>
|
||||
<li>move: Fix data loss when moving the same object (Nick Craig-Wood)</li>
|
||||
<li>log: Fix enabling systemd logging when using <code>--log-file</code> (Nick Craig-Wood)</li>
|
||||
<li>check
|
||||
<ul>
|
||||
<li>Make the error count match up in the log message (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>move: Fix data loss when source and destination are the same object (Nick Craig-Wood)</li>
|
||||
<li>operations
|
||||
<ul>
|
||||
<li>Fix --cutof-mode hard not cutting off immediately (Nick Craig-Wood)</li>
|
||||
<li>Fix --immutable error message (Nick Craig-Wood)</li>
|
||||
<li>Fix <code>--cutof-mode</code> hard not cutting off immediately (Nick Craig-Wood)</li>
|
||||
<li>Fix <code>--immutable</code> error message (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>sync
|
||||
<ul>
|
||||
<li>Fix --cutoff-mode soft & cautious so it doesn't end the transfer early (Nick Craig-Wood)</li>
|
||||
<li>Fix --immutable errors retrying many times (Nick Craig-Wood)</li>
|
||||
<li>Fix <code>--cutoff-mode</code> soft & cautious so it doesn't end the transfer early (Nick Craig-Wood)</li>
|
||||
<li>Fix <code>--immutable</code> errors retrying many times (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul></li>
|
||||
<li>Docs
|
||||
<ul>
|
||||
<li>Many fixes and a rewrite of the filtering docs (edwardxml)</li>
|
||||
<li>Many spelling and grammar problems (Josh Soref)</li>
|
||||
<li>Many spelling and grammar fixes (Josh Soref)</li>
|
||||
<li>Doc fixes for commands delete, purge, rmdir, rmdirs and mount (albertony)</li>
|
||||
<li>And thanks to these people for many doc fixes too numerous to list
|
||||
<ul>
|
||||
@@ -19963,13 +20066,11 @@ $ tree /tmp/b
|
||||
<li>Update systemd status with cache stats (Hekmon)</li>
|
||||
<li>Disable bazil/fuse based mount on macOS (Nick Craig-Wood)
|
||||
<ul>
|
||||
<li>Make mount be cmount under macOS (Nick Craig-Wood)</li>
|
||||
<li>Make <code>rclone mount</code> actually run <code>rclone cmount</code> under macOS (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Implement mknod to make NFS file creation work (Nick Craig-Wood)</li>
|
||||
<li>Make sure we don't call umount more than once (Nick Craig-Wood)</li>
|
||||
<li>Don't call host.Umount if a signal has been received (Nick Craig-Wood)</li>
|
||||
<li>More user friendly mounting as network drive on windows (albertony)</li>
|
||||
<li>Cleanup OS specific option handling and documentation (albertony)</li>
|
||||
<li>Detect if uid or gid are set in same option string: -o uid=123,gid=456 (albertony)</li>
|
||||
<li>Don't attempt to unmount if fs has been destroyed already (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
@@ -19977,32 +20078,34 @@ $ tree /tmp/b
|
||||
<ul>
|
||||
<li>Fix virtual entries causing deleted files to still appear (Nick Craig-Wood)</li>
|
||||
<li>Fix "file already exists" error for stale cache files (Nick Craig-Wood)</li>
|
||||
<li>Fix file leaks with --vfs-cache-mode full and --buffer-size 0 (Nick Craig-Wood)</li>
|
||||
<li>Fix file leaks with <code>--vfs-cache-mode</code> full and <code>--buffer-size 0</code> (Nick Craig-Wood)</li>
|
||||
<li>Fix invalid cache path on windows when using :backend: as remote (albertony)</li>
|
||||
</ul></li>
|
||||
<li>Local
|
||||
<ul>
|
||||
<li>Continue listing files/folders when a circular symlink is detected (Manish Gupta)</li>
|
||||
<li>New flag --local-zero-size-links to fix sync on some virtual filesystems (Riccardo Iaconelli)</li>
|
||||
<li>New flag <code>--local-zero-size-links</code> to fix sync on some virtual filesystems (Riccardo Iaconelli)</li>
|
||||
</ul></li>
|
||||
<li>Azure Blob
|
||||
<ul>
|
||||
<li>Add support for service principals (James Lim)</li>
|
||||
<li>Utilize streaming capabilities (Denis Neuling)</li>
|
||||
<li>Update SDK to v0.13.0 and fix API breakage (Nick Craig-Wood, Mitsuo Heijo)</li>
|
||||
<li>Add support for managed identities (Brad Ackerman)</li>
|
||||
<li>Add examples for access tier (Bob Pusateri)</li>
|
||||
<li>Utilize the streaming capabilities from the SDK for multipart uploads (Denis Neuling)</li>
|
||||
<li>Fix setting of mime types (Nick Craig-Wood)</li>
|
||||
<li>Fix crash when listing outside a SAS URL's root (Nick Craig-Wood)</li>
|
||||
<li>Delete archive tier blobs before update if --azureblob-archive-tier-delete (Nick Craig-Wood)</li>
|
||||
<li>Add support for managed identities (Brad Ackerman)</li>
|
||||
<li>Delete archive tier blobs before update if <code>--azureblob-archive-tier-delete</code> (Nick Craig-Wood)</li>
|
||||
<li>Fix crash on startup (Nick Craig-Wood)</li>
|
||||
<li>Add examples for access tier (Bob Pusateri)</li>
|
||||
<li>Fix memory usage by upgrading the SDK and implementing a TransferManager (Nick Craig-Wood)</li>
|
||||
<li>Fix memory usage by upgrading the SDK to v0.13.0 and implementing a TransferManager (Nick Craig-Wood)</li>
|
||||
<li>Require go1.14+ to compile due to SDK changes (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>B2
|
||||
<ul>
|
||||
<li>Make NewObject use less expensive API calls (Nick Craig-Wood)</li>
|
||||
<li>Fixed possible crash when accessing Backblaze b2 remote (lluuaapp)</li>
|
||||
<li>Make NewObject use less expensive API calls (Nick Craig-Wood)
|
||||
<ul>
|
||||
<li>This will improve <code>--files-from</code> and <code>restic serve</code> in particular</li>
|
||||
</ul></li>
|
||||
<li>Fixed crash on an empty file name (lluuaapp)</li>
|
||||
</ul></li>
|
||||
<li>Box
|
||||
<ul>
|
||||
@@ -20012,12 +20115,12 @@ $ tree /tmp/b
|
||||
<li>Chunker
|
||||
<ul>
|
||||
<li>Skip long local hashing, hash in-transit (fixes) (Ivan Andreev)</li>
|
||||
<li>Set Features.ReadMimeType=false as Object.MimeType not supported (Nick Craig-Wood)</li>
|
||||
<li>Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)</li>
|
||||
<li>Fix case-insensitive NewObject, test metadata detection (Ivan Andreev)</li>
|
||||
</ul></li>
|
||||
<li>Drive
|
||||
<ul>
|
||||
<li>Implement "rclone backend copyid" command for copying files by ID (Nick Craig-Wood)</li>
|
||||
<li>Implement <code>rclone backend copyid</code> command for copying files by ID (Nick Craig-Wood)</li>
|
||||
<li>Added flag <code>--drive-stop-on-download-limit</code> to stop transfers when the download limit is exceeded (Anagh Kumar Baranwal)</li>
|
||||
<li>Implement CleanUp workaround for team drives (buengese)</li>
|
||||
<li>Allow shortcut resolution and creation to be retried (Nick Craig-Wood)</li>
|
||||
@@ -20027,45 +20130,44 @@ $ tree /tmp/b
|
||||
<li>Dropbox
|
||||
<ul>
|
||||
<li>Add support for viewing shared files and folders (buengese)</li>
|
||||
<li>Implement IDer (buengese)</li>
|
||||
<li>Set Features.ReadMimeType=false as Object.MimeType not supported (Nick Craig-Wood)</li>
|
||||
<li>Tidy repeated error message (Nick Craig-Wood)</li>
|
||||
<li>Enable short lived access tokens (Nick Craig-Wood)</li>
|
||||
<li>Implement IDer on Objects so <code>rclone lsf</code> etc can read the IDs (buengese)</li>
|
||||
<li>Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)</li>
|
||||
<li>Make malformed_path errors from too long files not retriable (Nick Craig-Wood)</li>
|
||||
<li>Test file name length before upload to fix upload loop (Nick Craig-Wood)</li>
|
||||
<li>Enable short lived access tokens (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Fichier
|
||||
<ul>
|
||||
<li>Set Features.ReadMimeType=true as Object.MimeType is supported (Nick Craig-Wood)</li>
|
||||
<li>Set Features ReadMimeType to true as Object.MimeType is supported (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>FTP
|
||||
<ul>
|
||||
<li>Add --ftp-disable-msld option to ignore MLSD for really old servers (Nick Craig-Wood)</li>
|
||||
<li>Make --tpslimit apply (Nick Craig-Wood)</li>
|
||||
<li>Add <code>--ftp-disable-msld</code> option to ignore MLSD for really old servers (Nick Craig-Wood)</li>
|
||||
<li>Make <code>--tpslimit apply</code> (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Google Cloud Storage
|
||||
<ul>
|
||||
<li>Storage class object header support (Laurens Janssen)</li>
|
||||
<li>Fix anonymous client to use rclone's HTTP client (Nick Craig-Wood)</li>
|
||||
<li>Fix Entry doesn't belong in directory "" (same as directory) - ignoring (Nick Craig-Wood)</li>
|
||||
<li>Fix <code>Entry doesn't belong in directory "" (same as directory) - ignoring</code> (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Googlephotos
|
||||
<ul>
|
||||
<li>New flag --gphotos-include-archived (Nicolas Rueff)</li>
|
||||
<li>New flag <code>--gphotos-include-archived</code> to show archived photos as well (Nicolas Rueff)</li>
|
||||
</ul></li>
|
||||
<li>Jottacloud
|
||||
<ul>
|
||||
<li>Don't erroniously report support for writing mime types (buengese)</li>
|
||||
<li>Add support for Telia Cloud (#4930) (Patrik Nordlén)</li>
|
||||
<li>Don't erroneously report support for writing mime types (buengese)</li>
|
||||
<li>Add support for Telia Cloud (Patrik Nordlén)</li>
|
||||
</ul></li>
|
||||
<li>Mailru
|
||||
<ul>
|
||||
<li>Accept special folders eg camera-upload (Ivan Andreev)</li>
|
||||
<li>Avoid prehashing of large local files (Ivan Andreev)</li>
|
||||
<li>Fix uploads after recent changes on server (Ivan Andreev)</li>
|
||||
<li>Fix range requests after June 2020 changes on server (Ivan Andreev)</li>
|
||||
<li>Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)</li>
|
||||
<li>Remove deprecated protocol quirks (Ivan Andreev)</li>
|
||||
<li>Accept special folders eg camera-upload (Ivan Andreev)</li>
|
||||
<li>Avoid prehashing of large local files (Ivan Andreev)</li>
|
||||
</ul></li>
|
||||
<li>Memory
|
||||
<ul>
|
||||
@@ -20073,14 +20175,14 @@ $ tree /tmp/b
|
||||
</ul></li>
|
||||
<li>Onedrive
|
||||
<ul>
|
||||
<li>Add support for china region operated by 21vianet and other regional suppliers (#4963) (NyaMisty)</li>
|
||||
<li>Add support for China region operated by 21vianet and other regional suppliers (NyaMisty)</li>
|
||||
<li>Warn on gateway timeout errors (Nick Craig-Wood)</li>
|
||||
<li>Fall back to normal copy if server-side copy unavailable (#4903) (Alex Chen)</li>
|
||||
<li>Fall back to normal copy if server-side copy unavailable (Alex Chen)</li>
|
||||
<li>Fix server-side copy completely disabled on OneDrive for Business (Cnly)</li>
|
||||
<li>(business only) workaround to replace existing file on server-side copy (#4904) (Alex Chen)</li>
|
||||
<li>(business only) workaround to replace existing file on server-side copy (Alex Chen)</li>
|
||||
<li>Enhance link creation with expiry, scope, type and password (Nick Craig-Wood)</li>
|
||||
<li>Remove % and # from the set of encoded characters (#4909) (Alex Chen)</li>
|
||||
<li>Support addressing site by server-relative URL (#4761) (kice)</li>
|
||||
<li>Remove % and # from the set of encoded characters (Alex Chen)</li>
|
||||
<li>Support addressing site by server-relative URL (kice)</li>
|
||||
</ul></li>
|
||||
<li>Opendrive
|
||||
<ul>
|
||||
@@ -20102,14 +20204,16 @@ $ tree /tmp/b
|
||||
<li>S3
|
||||
<ul>
|
||||
<li>Added <code>--s3-disable-http2</code> to disable http/2 (Anagh Kumar Baranwal)</li>
|
||||
<li>Complete SSE-C implementation (Nick Craig-Wood)</li>
|
||||
<li>Complete SSE-C implementation (Nick Craig-Wood)
|
||||
<ul>
|
||||
<li>Fix hashes on small files with AWS:KMS and SSE-C (Nick Craig-Wood)</li>
|
||||
<li>Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Add <code>--s3-no-head parameter</code> to minimise transactions on upload (Nick Craig-Wood)</li>
|
||||
<li>Update docs with a Reducing Costs section (Nick Craig-Wood)</li>
|
||||
<li>Added error handling for error code 429 indicating too many requests (Anagh Kumar Baranwal)</li>
|
||||
<li>Add requester pays option (kelv)</li>
|
||||
<li>Fix copy multipart with v2 auth failing with 'SignatureDoesNotMatch' (Louis Koo)</li>
|
||||
<li>Add --s3-no-head parameter to minimise transactions on upload (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>SFTP
|
||||
<ul>
|
||||
@@ -20119,8 +20223,8 @@ $ tree /tmp/b
|
||||
<li>Remember entered password in AskPass mode (Stephen Harris)</li>
|
||||
<li>Implement Shutdown method (Nick Craig-Wood)</li>
|
||||
<li>Implement keyboard interactive authentication (Nick Craig-Wood)</li>
|
||||
<li>Make --tpslimit apply (Nick Craig-Wood)</li>
|
||||
<li>Implement --sftp-use-fstat (Nick Craig-Wood)</li>
|
||||
<li>Make <code>--tpslimit</code> apply (Nick Craig-Wood)</li>
|
||||
<li>Implement <code>--sftp-use-fstat</code> for unusual SFTP servers (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Sugarsync
|
||||
<ul>
|
||||
@@ -20130,7 +20234,7 @@ $ tree /tmp/b
|
||||
<li>Swift
|
||||
<ul>
|
||||
<li>Fix deletion of parts of Static Large Object (SLO) (Nguyễn Hữu Luân)</li>
|
||||
<li>Ensure partially uploaded large files are uploaded unless --swift-leave-parts-on-error (Nguyễn Hữu Luân)</li>
|
||||
<li>Ensure partially uploaded large files are uploaded unless <code>--swift-leave-parts-on-error</code> (Nguyễn Hữu Luân)</li>
|
||||
</ul></li>
|
||||
<li>Tardigrade
|
||||
<ul>
|
||||
@@ -20142,7 +20246,7 @@ $ tree /tmp/b
|
||||
</ul></li>
|
||||
<li>Yandex
|
||||
<ul>
|
||||
<li>Set Features.WriteMimeType=false as Yandex ignores mime types (Nick Craig-Wood)</li>
|
||||
<li>Set Features WriteMimeType to false as Yandex ignores mime types (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.53.4---2021-01-20">v1.53.4 - 2021-01-20</h2>
|
||||
|
||||
341
MANUAL.md
generated
341
MANUAL.md
generated
@@ -1,6 +1,6 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Feb 02, 2021
|
||||
% Mar 08, 2021
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -3108,6 +3108,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -3832,6 +3839,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -4145,6 +4159,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -4616,6 +4637,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -5153,6 +5181,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -5631,6 +5666,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
@@ -8636,7 +8678,7 @@ not list `dir3`, `file3` or `.ignore`.
|
||||
## Common pitfalls
|
||||
|
||||
The most frequent filter support issues on
|
||||
the [rclone forum](https://https://forum.rclone.org/) are:
|
||||
the [rclone forum](https://forum.rclone.org/) are:
|
||||
|
||||
* Not using paths relative to the root of the remote
|
||||
* Not using `/` to match from the root of a remote
|
||||
@@ -10031,6 +10073,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
|
||||
|
||||
See the [copy command](https://rclone.org/commands/rclone_copy/) command for more information on the above.
|
||||
@@ -10043,6 +10086,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
- deleteEmptySrcDirs - delete empty src directories if set
|
||||
|
||||
|
||||
@@ -10056,6 +10100,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
|
||||
|
||||
See the [sync command](https://rclone.org/commands/rclone_sync/) command for more information on the above.
|
||||
@@ -10954,7 +10999,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.1")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -11085,7 +11130,7 @@ and may be set in the config file.
|
||||
--drive-starred-only Only show files that are starred.
|
||||
--drive-stop-on-download-limit Make download limit errors be fatal
|
||||
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-team-drive string ID of the Shared Drive (Team Drive)
|
||||
--drive-token string OAuth Access Token as a JSON blob.
|
||||
--drive-token-url string Token server url.
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
@@ -11366,8 +11411,13 @@ and may be set in the config file.
|
||||
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-token string OAuth Access Token as a JSON blob.
|
||||
--yandex-token-url string Token server url.
|
||||
--zoho-auth-url string Auth server URL.
|
||||
--zoho-client-id string OAuth Client Id
|
||||
--zoho-client-secret string OAuth Client Secret
|
||||
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
|
||||
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
--zoho-token string OAuth Access Token as a JSON blob.
|
||||
--zoho-token-url string Token server url.
|
||||
```
|
||||
|
||||
1Fichier
|
||||
@@ -12281,6 +12331,16 @@ You'd then do a full `rclone sync` less often.
|
||||
|
||||
Note that `--fast-list` isn't required in the top-up sync.
|
||||
|
||||
#### Avoiding HEAD requests after PUT
|
||||
|
||||
By default rclone will HEAD every object it uploads. It does this to
|
||||
check the object got uploaded correctly.
|
||||
|
||||
You can disable this with the [--s3-no-head](#s3-no-head) option - see
|
||||
there for more details.
|
||||
|
||||
Setting this flag increases the chance for undetected upload failures.
|
||||
|
||||
### Hashes ###
|
||||
|
||||
For small objects which weren't uploaded as multipart uploads (objects
|
||||
@@ -15706,7 +15766,7 @@ These URLs are used by Plex internally to connect to the Plex server securely.
|
||||
|
||||
The format for these URLs is the following:
|
||||
|
||||
https://ip-with-dots-replaced.server-hash.plex.direct:32400/
|
||||
`https://ip-with-dots-replaced.server-hash.plex.direct:32400/`
|
||||
|
||||
The `ip-with-dots-replaced` part can be any IPv4 address, where the dots
|
||||
have been replaced with dashes, e.g. `127.0.0.1` becomes `127-0-0-1`.
|
||||
@@ -16870,7 +16930,7 @@ as you would with any other remote, e.g. `rclone copy D:\docs secret:\docs`,
|
||||
and rclone will encrypt and decrypt as needed on the fly.
|
||||
If you access the wrapped remote `remote:path` directly you will bypass
|
||||
the encryption, and anything you read will be in encrypted form, and
|
||||
anything you write will be undencrypted. To avoid issues it is best to
|
||||
anything you write will be unencrypted. To avoid issues it is best to
|
||||
configure a dedicated path for encrypted content, and access it
|
||||
exclusively through a crypt remote.
|
||||
|
||||
@@ -17821,6 +17881,21 @@ memory. It can be set smaller if you are tight on memory.
|
||||
|
||||
Impersonate this user when using a business account.
|
||||
|
||||
Note that if you want to use impersonate, you should make sure this
|
||||
flag is set when running "rclone config" as this will cause rclone to
|
||||
request the "members.read" scope which it won't normally. This is
|
||||
needed to lookup a members email address into the internal ID that
|
||||
dropbox uses in the API.
|
||||
|
||||
Using the "members.read" scope will require a Dropbox Team Admin
|
||||
to approve during the OAuth flow.
|
||||
|
||||
You will have to use your own App (setting your own client_id and
|
||||
client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
|
||||
|
||||
- Config: impersonate
|
||||
- Env Var: RCLONE_DROPBOX_IMPERSONATE
|
||||
- Type: string
|
||||
@@ -18277,8 +18352,8 @@ excess files in the directory.
|
||||
|
||||
Rlone FTP supports implicit FTP over TLS servers (FTPS). This has to
|
||||
be enabled in the FTP backend config for the remote, or with
|
||||
`[--ftp-tls]{#ftp-tls}`. The default FTPS port is `990`, not `21` and
|
||||
can be set with `[--ftp-port]{#ftp-port}`.
|
||||
[`--ftp-tls`](#ftp-tls). The default FTPS port is `990`, not `21` and
|
||||
can be set with [`--ftp-port`](#ftp-port).
|
||||
|
||||
|
||||
### Standard Options
|
||||
@@ -19042,7 +19117,7 @@ If your browser doesn't open automatically go to the following link: http://127.
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
Configure this as a team drive?
|
||||
Configure this as a Shared Drive (Team Drive)?
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
@@ -19249,23 +19324,24 @@ Note: in case you configured a specific root folder on gdrive and rclone is unab
|
||||
`rclone -v foo@example.com lsf gdrive:backup`
|
||||
|
||||
|
||||
### Team drives ###
|
||||
### Shared drives (team drives) ###
|
||||
|
||||
If you want to configure the remote to point to a Google Team Drive
|
||||
then answer `y` to the question `Configure this as a team drive?`.
|
||||
If you want to configure the remote to point to a Google Shared Drive
|
||||
(previously known as Team Drives) then answer `y` to the question
|
||||
`Configure this as a Shared Drive (Team Drive)?`.
|
||||
|
||||
This will fetch the list of Team Drives from google and allow you to
|
||||
configure which one you want to use. You can also type in a team
|
||||
drive ID if you prefer.
|
||||
This will fetch the list of Shared Drives from google and allow you to
|
||||
configure which one you want to use. You can also type in a Shared
|
||||
Drive ID if you prefer.
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
Configure this as a team drive?
|
||||
Configure this as a Shared Drive (Team Drive)?
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Fetching team drive list...
|
||||
Fetching Shared Drive list...
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Rclone Test
|
||||
\ "xxxxxxxxxxxxxxxxxxxx"
|
||||
@@ -19273,7 +19349,7 @@ Choose a number from below, or type in your own value
|
||||
\ "yyyyyyyyyyyyyyyyyyyy"
|
||||
3 / Rclone Test 3
|
||||
\ "zzzzzzzzzzzzzzzzzzzz"
|
||||
Enter a Team Drive ID> 1
|
||||
Enter a Shared Drive ID> 1
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
@@ -19644,7 +19720,7 @@ Needed only if you want use SA instead of interactive login.
|
||||
|
||||
#### --drive-team-drive
|
||||
|
||||
ID of the Team Drive
|
||||
ID of the Shared Drive (Team Drive)
|
||||
|
||||
- Config: team_drive
|
||||
- Env Var: RCLONE_DRIVE_TEAM_DRIVE
|
||||
@@ -20107,11 +20183,11 @@ Options:
|
||||
|
||||
#### drives
|
||||
|
||||
List the shared drives available to this account
|
||||
List the Shared Drives available to this account
|
||||
|
||||
rclone backend drives remote: [options] [<arguments>+]
|
||||
|
||||
This command lists the shared drives (teamdrives) available to this
|
||||
This command lists the Shared Drives (Team Drives) available to this
|
||||
account.
|
||||
|
||||
Usage:
|
||||
@@ -25747,8 +25823,8 @@ The Go SSH library disables the use of the aes128-cbc cipher by
|
||||
default, due to security concerns. This can be re-enabled on a
|
||||
per-connection basis by setting the `use_insecure_cipher` setting in
|
||||
the configuration file to `true`. Further details on the insecurity of
|
||||
this cipher can be found [in this paper]
|
||||
(http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf).
|
||||
this cipher can be found
|
||||
[in this paper](http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf).
|
||||
|
||||
SFTP isn't supported under plan9 until [this
|
||||
issue](https://github.com/pkg/sftp/issues/156) is fixed.
|
||||
@@ -27199,6 +27275,26 @@ from filenames during upload.
|
||||
|
||||
Here are the standard options specific to zoho (Zoho).
|
||||
|
||||
#### --zoho-client-id
|
||||
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-client-secret
|
||||
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-region
|
||||
|
||||
Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
@@ -27221,6 +27317,35 @@ Zoho region to connect to. You'll have to use the region you organization is reg
|
||||
|
||||
Here are the advanced options specific to zoho (Zoho).
|
||||
|
||||
#### --zoho-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ZOHO_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ZOHO_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ZOHO_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
@@ -27752,12 +27877,49 @@ Options:
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.54.1 - 2021-03-08
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.54.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix --bwlimit when up or down is off (Nick Craig-Wood)
|
||||
* docs
|
||||
* Fix nesting of brackets and backticks in ftp docs (edwardxml)
|
||||
* Fix broken link in sftp page (edwardxml)
|
||||
* Fix typo in crypt.md (Romeo Kienzler)
|
||||
* Changelog: Correct link to digitalis.io (Alex JOST)
|
||||
* Replace #file-caching with #vfs-file-caching (Miron Veryanskiy)
|
||||
* Convert bogus example link to code (edwardxml)
|
||||
* Remove dead link from rc.md (edwardxml)
|
||||
* rc: Sync,copy,move: document createEmptySrcDirs parameter (Nick Craig-Wood)
|
||||
* lsjson: Fix unterminated JSON in the presence of errors (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Fix mount dropping on macOS by setting --daemon-timeout 10m (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Document simultaneous usage with the same cache shouldn't be used (Nick Craig-Wood)
|
||||
* B2
|
||||
* Automatically raise upload cutoff to avoid spurious error (Nick Craig-Wood)
|
||||
* Fix failed to create file system with application key limited to a prefix (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Refer to Shared Drives instead of Team Drives (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Add scopes to oauth request and optionally "members.read" (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix failed to create file system with folder level permissions policy (Nick Craig-Wood)
|
||||
* Fix Wasabi HEAD requests returning stale data by using only 1 transport (Nick Craig-Wood)
|
||||
* Fix shared_credentials_file auth (Dmitry Chepurovskiy)
|
||||
* Add --s3-no-head to reducing costs docs (Nick Craig-Wood)
|
||||
* Union
|
||||
* Fix mkdir at root with remote:/ (Nick Craig-Wood)
|
||||
* Zoho
|
||||
* Fix custom client id's (buengese)
|
||||
|
||||
## v1.54.0 - 2021-02-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.54.0)
|
||||
|
||||
* New backends
|
||||
* Compression remote (experimental)(buengese)
|
||||
* Compression remote (experimental) (buengese)
|
||||
* Enterprise File Fabric (Nick Craig-Wood)
|
||||
* This work was sponsored by [Storage Made Easy](https://storagemadeeasy.com/)
|
||||
* HDFS (Hadoop Distributed File System) (Yury Stankevich)
|
||||
@@ -27765,41 +27927,35 @@ Options:
|
||||
* New Features
|
||||
* Deglobalise the config (Nick Craig-Wood)
|
||||
* Global config now read from the context
|
||||
* Global config can be passed into the rc
|
||||
* This work was sponsored by [Digitalis](digitalis.io)
|
||||
* This will enable passing of global config via the rc
|
||||
* This work was sponsored by [Digitalis](https://digitalis.io/)
|
||||
* Add `--bwlimit` for upload and download (Nick Craig-Wood)
|
||||
* Obey bwlimit in http Transport for better limiting
|
||||
* Enhance systemd integration (Hekmon)
|
||||
* log level identification
|
||||
* manual activation with flag
|
||||
* automatic systemd launch detection
|
||||
* log level identification, manual activation with flag, automatic systemd launch detection
|
||||
* Don't compile systemd log integration for non unix systems (Benjamin Gustin)
|
||||
* Add a download flag to hashsum and related commands to force rclone to download and hash files locally (lostheli)
|
||||
* Add a `--download` flag to md5sum/sha1sum/hashsum to force rclone to download and hash files locally (lostheli)
|
||||
* Add `--progress-terminal-title` to print ETA to terminal title (LaSombra)
|
||||
* Make backend env vars show in help as the defaults for backend flags (Nick Craig-Wood)
|
||||
* build
|
||||
* Raise minimum go version to go1.12 (Nick Craig-Wood)
|
||||
* check
|
||||
* Make the error count match up in the log message (Nick Craig-Wood)
|
||||
* cmd
|
||||
* Add --progress-terminal-title to print ETA to terminal title (LaSombra)
|
||||
* Make backend env vars show in help as the defaults for backend flags (Nick Craig-Wood)
|
||||
* dedupe
|
||||
* Add --by-hash to dedupe on hash not file name (Nick Craig-Wood)
|
||||
* Add --dedupe-mode list to just list dupes, changing nothing (Nick Craig-Wood)
|
||||
* Add `--by-hash` to dedupe on content hash not file name (Nick Craig-Wood)
|
||||
* Add `--dedupe-mode list` to just list dupes, changing nothing (Nick Craig-Wood)
|
||||
* Add warning if used on a remote which can't have duplicate names (Nick Craig-Wood)
|
||||
* flags: Improve error message when reading environment vars (Nick Craig-Wood)
|
||||
* fs
|
||||
* Add Shutdown optional method for backends (Nick Craig-Wood)
|
||||
* When using --files-from check files concurrently (zhucan)
|
||||
* Accumulate stats when using --dry-run (Ingo Weiss)
|
||||
* Always show stats when using --dry-run or --interactive (Nick Craig-Wood)
|
||||
* Add support for flag --no-console on windows to hide the console window (albertony)
|
||||
* When using `--files-from` check files concurrently (zhucan)
|
||||
* Accumulate stats when using `--dry-run` (Ingo Weiss)
|
||||
* Always show stats when using `--dry-run` or `--interactive` (Nick Craig-Wood)
|
||||
* Add support for flag `--no-console` on windows to hide the console window (albertony)
|
||||
* genautocomplete: Add support to output to stdout (Ingo)
|
||||
* ncdu
|
||||
* Highlight read errors instead of aborting (Claudio Bantaloukas)
|
||||
* Add sort by average size in directory (Adam Plánský)
|
||||
* Add toggle option for average size in directory - key 'a' (Adam Plánský)
|
||||
* Add toggle option for average s3ize in directory - key 'a' (Adam Plánský)
|
||||
* Add empty folder flag into ncdu browser (Adam Plánský)
|
||||
* Add ! (errror) and . (unreadable) file flags to go with e (empty) (Nick Craig-Wood)
|
||||
* Add `!` (errror) and `.` (unreadable) file flags to go with `e` (empty) (Nick Craig-Wood)
|
||||
* obscure: Make `rclone osbcure -` ignore newline at end of line (Nick Craig-Wood)
|
||||
* operations
|
||||
* Add logs when need to upload files to set mod times (Nick Craig-Wood)
|
||||
@@ -27817,26 +27973,22 @@ Options:
|
||||
* Prompt user for updating webui if an update is available (Chaitanya Bankanhal)
|
||||
* Fix plugins initialization (negative0)
|
||||
* Bug Fixes
|
||||
* build
|
||||
* Explicitly set ARM version to fix build (Nick Craig-Wood)
|
||||
* Don't explicitly set ARM version to fix ARMv5 build (Nick Craig-Wood)
|
||||
* Fix nfpm install (Nick Craig-Wood)
|
||||
* Fix docker build by upgrading ilteoood/docker_buildx (Nick Craig-Wood)
|
||||
* Temporary fix for Windows build errors (Ivan Andreev)
|
||||
* fs
|
||||
* Fix nil pointer on copy & move operations directly to remote (Anagh Kumar Baranwal)
|
||||
* Fix parsing of .. when joining remotes (Nick Craig-Wood)
|
||||
* log: Fix enabling systemd logging when using --log-file (Nick Craig-Wood)
|
||||
* move: Fix data loss when moving the same object (Nick Craig-Wood)
|
||||
* log: Fix enabling systemd logging when using `--log-file` (Nick Craig-Wood)
|
||||
* check
|
||||
* Make the error count match up in the log message (Nick Craig-Wood)
|
||||
* move: Fix data loss when source and destination are the same object (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix --cutof-mode hard not cutting off immediately (Nick Craig-Wood)
|
||||
* Fix --immutable error message (Nick Craig-Wood)
|
||||
* Fix `--cutof-mode` hard not cutting off immediately (Nick Craig-Wood)
|
||||
* Fix `--immutable` error message (Nick Craig-Wood)
|
||||
* sync
|
||||
* Fix --cutoff-mode soft & cautious so it doesn't end the transfer early (Nick Craig-Wood)
|
||||
* Fix --immutable errors retrying many times (Nick Craig-Wood)
|
||||
* Fix `--cutoff-mode` soft & cautious so it doesn't end the transfer early (Nick Craig-Wood)
|
||||
* Fix `--immutable` errors retrying many times (Nick Craig-Wood)
|
||||
* Docs
|
||||
* Many fixes and a rewrite of the filtering docs (edwardxml)
|
||||
* Many spelling and grammar problems (Josh Soref)
|
||||
* Many spelling and grammar fixes (Josh Soref)
|
||||
* Doc fixes for commands delete, purge, rmdir, rmdirs and mount (albertony)
|
||||
* And thanks to these people for many doc fixes too numerous to list
|
||||
* Ameer Dawood, Antoine GIRARD, Bob Bagwill, Christopher Stewart
|
||||
@@ -27846,46 +27998,44 @@ Options:
|
||||
* Mount
|
||||
* Update systemd status with cache stats (Hekmon)
|
||||
* Disable bazil/fuse based mount on macOS (Nick Craig-Wood)
|
||||
* Make mount be cmount under macOS (Nick Craig-Wood)
|
||||
* Make `rclone mount` actually run `rclone cmount` under macOS (Nick Craig-Wood)
|
||||
* Implement mknod to make NFS file creation work (Nick Craig-Wood)
|
||||
* Make sure we don't call umount more than once (Nick Craig-Wood)
|
||||
* Don't call host.Umount if a signal has been received (Nick Craig-Wood)
|
||||
* More user friendly mounting as network drive on windows (albertony)
|
||||
* Cleanup OS specific option handling and documentation (albertony)
|
||||
* Detect if uid or gid are set in same option string: -o uid=123,gid=456 (albertony)
|
||||
* Don't attempt to unmount if fs has been destroyed already (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix virtual entries causing deleted files to still appear (Nick Craig-Wood)
|
||||
* Fix "file already exists" error for stale cache files (Nick Craig-Wood)
|
||||
* Fix file leaks with --vfs-cache-mode full and --buffer-size 0 (Nick Craig-Wood)
|
||||
* Fix file leaks with `--vfs-cache-mode` full and `--buffer-size 0` (Nick Craig-Wood)
|
||||
* Fix invalid cache path on windows when using :backend: as remote (albertony)
|
||||
* Local
|
||||
* Continue listing files/folders when a circular symlink is detected (Manish Gupta)
|
||||
* New flag --local-zero-size-links to fix sync on some virtual filesystems (Riccardo Iaconelli)
|
||||
* New flag `--local-zero-size-links` to fix sync on some virtual filesystems (Riccardo Iaconelli)
|
||||
* Azure Blob
|
||||
* Add support for service principals (James Lim)
|
||||
* Utilize streaming capabilities (Denis Neuling)
|
||||
* Update SDK to v0.13.0 and fix API breakage (Nick Craig-Wood, Mitsuo Heijo)
|
||||
* Add support for managed identities (Brad Ackerman)
|
||||
* Add examples for access tier (Bob Pusateri)
|
||||
* Utilize the streaming capabilities from the SDK for multipart uploads (Denis Neuling)
|
||||
* Fix setting of mime types (Nick Craig-Wood)
|
||||
* Fix crash when listing outside a SAS URL's root (Nick Craig-Wood)
|
||||
* Delete archive tier blobs before update if --azureblob-archive-tier-delete (Nick Craig-Wood)
|
||||
* Add support for managed identities (Brad Ackerman)
|
||||
* Delete archive tier blobs before update if `--azureblob-archive-tier-delete` (Nick Craig-Wood)
|
||||
* Fix crash on startup (Nick Craig-Wood)
|
||||
* Add examples for access tier (Bob Pusateri)
|
||||
* Fix memory usage by upgrading the SDK and implementing a TransferManager (Nick Craig-Wood)
|
||||
* Fix memory usage by upgrading the SDK to v0.13.0 and implementing a TransferManager (Nick Craig-Wood)
|
||||
* Require go1.14+ to compile due to SDK changes (Nick Craig-Wood)
|
||||
* B2
|
||||
* Make NewObject use less expensive API calls (Nick Craig-Wood)
|
||||
* Fixed possible crash when accessing Backblaze b2 remote (lluuaapp)
|
||||
* This will improve `--files-from` and `restic serve` in particular
|
||||
* Fixed crash on an empty file name (lluuaapp)
|
||||
* Box
|
||||
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||
* Fix finding directories in a case insentive way (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Skip long local hashing, hash in-transit (fixes) (Ivan Andreev)
|
||||
* Set Features.ReadMimeType=false as Object.MimeType not supported (Nick Craig-Wood)
|
||||
* Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)
|
||||
* Fix case-insensitive NewObject, test metadata detection (Ivan Andreev)
|
||||
* Drive
|
||||
* Implement "rclone backend copyid" command for copying files by ID (Nick Craig-Wood)
|
||||
* Implement `rclone backend copyid` command for copying files by ID (Nick Craig-Wood)
|
||||
* Added flag `--drive-stop-on-download-limit` to stop transfers when the download limit is exceeded (Anagh Kumar Baranwal)
|
||||
* Implement CleanUp workaround for team drives (buengese)
|
||||
* Allow shortcut resolution and creation to be retried (Nick Craig-Wood)
|
||||
@@ -27893,44 +28043,43 @@ Options:
|
||||
* Add xdg office icons to xdg desktop files (Pau Rodriguez-Estivill)
|
||||
* Dropbox
|
||||
* Add support for viewing shared files and folders (buengese)
|
||||
* Implement IDer (buengese)
|
||||
* Set Features.ReadMimeType=false as Object.MimeType not supported (Nick Craig-Wood)
|
||||
* Tidy repeated error message (Nick Craig-Wood)
|
||||
* Enable short lived access tokens (Nick Craig-Wood)
|
||||
* Implement IDer on Objects so `rclone lsf` etc can read the IDs (buengese)
|
||||
* Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)
|
||||
* Make malformed_path errors from too long files not retriable (Nick Craig-Wood)
|
||||
* Test file name length before upload to fix upload loop (Nick Craig-Wood)
|
||||
* Enable short lived access tokens (Nick Craig-Wood)
|
||||
* Fichier
|
||||
* Set Features.ReadMimeType=true as Object.MimeType is supported (Nick Craig-Wood)
|
||||
* Set Features ReadMimeType to true as Object.MimeType is supported (Nick Craig-Wood)
|
||||
* FTP
|
||||
* Add --ftp-disable-msld option to ignore MLSD for really old servers (Nick Craig-Wood)
|
||||
* Make --tpslimit apply (Nick Craig-Wood)
|
||||
* Add `--ftp-disable-msld` option to ignore MLSD for really old servers (Nick Craig-Wood)
|
||||
* Make `--tpslimit apply` (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Storage class object header support (Laurens Janssen)
|
||||
* Fix anonymous client to use rclone's HTTP client (Nick Craig-Wood)
|
||||
* Fix Entry doesn't belong in directory "" (same as directory) - ignoring (Nick Craig-Wood)
|
||||
* Fix `Entry doesn't belong in directory "" (same as directory) - ignoring` (Nick Craig-Wood)
|
||||
* Googlephotos
|
||||
* New flag --gphotos-include-archived (Nicolas Rueff)
|
||||
* New flag `--gphotos-include-archived` to show archived photos as well (Nicolas Rueff)
|
||||
* Jottacloud
|
||||
* Don't erroniously report support for writing mime types (buengese)
|
||||
* Add support for Telia Cloud (#4930) (Patrik Nordlén)
|
||||
* Don't erroneously report support for writing mime types (buengese)
|
||||
* Add support for Telia Cloud (Patrik Nordlén)
|
||||
* Mailru
|
||||
* Accept special folders eg camera-upload (Ivan Andreev)
|
||||
* Avoid prehashing of large local files (Ivan Andreev)
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after June 2020 changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Remove deprecated protocol quirks (Ivan Andreev)
|
||||
* Accept special folders eg camera-upload (Ivan Andreev)
|
||||
* Avoid prehashing of large local files (Ivan Andreev)
|
||||
* Memory
|
||||
* Fix setting of mime types (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Add support for china region operated by 21vianet and other regional suppliers (#4963) (NyaMisty)
|
||||
* Add support for China region operated by 21vianet and other regional suppliers (NyaMisty)
|
||||
* Warn on gateway timeout errors (Nick Craig-Wood)
|
||||
* Fall back to normal copy if server-side copy unavailable (#4903) (Alex Chen)
|
||||
* Fall back to normal copy if server-side copy unavailable (Alex Chen)
|
||||
* Fix server-side copy completely disabled on OneDrive for Business (Cnly)
|
||||
* (business only) workaround to replace existing file on server-side copy (#4904) (Alex Chen)
|
||||
* (business only) workaround to replace existing file on server-side copy (Alex Chen)
|
||||
* Enhance link creation with expiry, scope, type and password (Nick Craig-Wood)
|
||||
* Remove % and # from the set of encoded characters (#4909) (Alex Chen)
|
||||
* Support addressing site by server-relative URL (#4761) (kice)
|
||||
* Remove % and # from the set of encoded characters (Alex Chen)
|
||||
* Support addressing site by server-relative URL (kice)
|
||||
* Opendrive
|
||||
* Fix finding directories in a case insensitive way (Nick Craig-Wood)
|
||||
* Pcloud
|
||||
@@ -27943,13 +28092,13 @@ Options:
|
||||
* S3
|
||||
* Added `--s3-disable-http2` to disable http/2 (Anagh Kumar Baranwal)
|
||||
* Complete SSE-C implementation (Nick Craig-Wood)
|
||||
* Fix hashes on small files with AWS:KMS and SSE-C (Nick Craig-Wood)
|
||||
* Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C (Nick Craig-Wood)
|
||||
* Fix hashes on small files with AWS:KMS and SSE-C (Nick Craig-Wood)
|
||||
* Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C (Nick Craig-Wood)
|
||||
* Add `--s3-no-head parameter` to minimise transactions on upload (Nick Craig-Wood)
|
||||
* Update docs with a Reducing Costs section (Nick Craig-Wood)
|
||||
* Added error handling for error code 429 indicating too many requests (Anagh Kumar Baranwal)
|
||||
* Add requester pays option (kelv)
|
||||
* Fix copy multipart with v2 auth failing with 'SignatureDoesNotMatch' (Louis Koo)
|
||||
* Add --s3-no-head parameter to minimise transactions on upload (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Allow cert based auth via optional pubkey (Stephen Harris)
|
||||
* Allow user to optionally check server hosts key to add security (Stephen Harris)
|
||||
@@ -27957,20 +28106,20 @@ Options:
|
||||
* Remember entered password in AskPass mode (Stephen Harris)
|
||||
* Implement Shutdown method (Nick Craig-Wood)
|
||||
* Implement keyboard interactive authentication (Nick Craig-Wood)
|
||||
* Make --tpslimit apply (Nick Craig-Wood)
|
||||
* Implement --sftp-use-fstat (Nick Craig-Wood)
|
||||
* Make `--tpslimit` apply (Nick Craig-Wood)
|
||||
* Implement `--sftp-use-fstat` for unusual SFTP servers (Nick Craig-Wood)
|
||||
* Sugarsync
|
||||
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||
* Fix finding directories in a case insentive way (Nick Craig-Wood)
|
||||
* Swift
|
||||
* Fix deletion of parts of Static Large Object (SLO) (Nguyễn Hữu Luân)
|
||||
* Ensure partially uploaded large files are uploaded unless --swift-leave-parts-on-error (Nguyễn Hữu Luân)
|
||||
* Ensure partially uploaded large files are uploaded unless `--swift-leave-parts-on-error` (Nguyễn Hữu Luân)
|
||||
* Tardigrade
|
||||
* Upgrade to uplink v1.4.1 (Caleb Case)
|
||||
* WebDAV
|
||||
* Updated docs to show streaming to nextcloud is working (Durval Menezes)
|
||||
* Yandex
|
||||
* Set Features.WriteMimeType=false as Yandex ignores mime types (Nick Craig-Wood)
|
||||
* Set Features WriteMimeType to false as Yandex ignores mime types (Nick Craig-Wood)
|
||||
|
||||
## v1.53.4 - 2021-01-20
|
||||
|
||||
|
||||
358
MANUAL.txt
generated
358
MANUAL.txt
generated
@@ -1,6 +1,6 @@
|
||||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Feb 02, 2021
|
||||
Mar 08, 2021
|
||||
|
||||
|
||||
|
||||
@@ -2962,8 +2962,8 @@ Limitations
|
||||
Without the use of --vfs-cache-mode this can only write files
|
||||
sequentially, it can only seek when reading. This means that many
|
||||
applications won't work with their files on an rclone mount without
|
||||
--vfs-cache-mode writes or --vfs-cache-mode full. See the File Caching
|
||||
section for more info.
|
||||
--vfs-cache-mode writes or --vfs-cache-mode full. See the VFS File
|
||||
Caching section for more info.
|
||||
|
||||
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
@@ -2979,7 +2979,7 @@ File systems expect things to be 100% reliable, whereas cloud storage
|
||||
systems are a long way from 100% reliable. The rclone sync/copy commands
|
||||
cope with this with lots of retries. However rclone mount can't use
|
||||
retries in the same way without making local copies of the uploads. Look
|
||||
at the file caching for solutions to make mount more reliable.
|
||||
at the VFS File Caching for solutions to make mount more reliable.
|
||||
|
||||
|
||||
Attribute caching
|
||||
@@ -3150,6 +3150,12 @@ for two reasons. Firstly because it is only checked every
|
||||
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
|
||||
from the cache.
|
||||
|
||||
You SHOULD NOT run two copies of rclone using the same VFS cache with
|
||||
the same or overlapping remotes if using --vfs-cache-mode > off. This
|
||||
can potentially cause data corruption if you do. You can work around
|
||||
this by giving each rclone its own cache hierarchy with --cache-dir. You
|
||||
don't need to worry about this if the remotes in use don't overlap.
|
||||
|
||||
--vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote
|
||||
@@ -3878,6 +3884,12 @@ for two reasons. Firstly because it is only checked every
|
||||
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
|
||||
from the cache.
|
||||
|
||||
You SHOULD NOT run two copies of rclone using the same VFS cache with
|
||||
the same or overlapping remotes if using --vfs-cache-mode > off. This
|
||||
can potentially cause data corruption if you do. You can work around
|
||||
this by giving each rclone its own cache hierarchy with --cache-dir. You
|
||||
don't need to worry about this if the remotes in use don't overlap.
|
||||
|
||||
--vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote
|
||||
@@ -4200,6 +4212,12 @@ for two reasons. Firstly because it is only checked every
|
||||
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
|
||||
from the cache.
|
||||
|
||||
You SHOULD NOT run two copies of rclone using the same VFS cache with
|
||||
the same or overlapping remotes if using --vfs-cache-mode > off. This
|
||||
can potentially cause data corruption if you do. You can work around
|
||||
this by giving each rclone its own cache hierarchy with --cache-dir. You
|
||||
don't need to worry about this if the remotes in use don't overlap.
|
||||
|
||||
--vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote
|
||||
@@ -4697,6 +4715,12 @@ for two reasons. Firstly because it is only checked every
|
||||
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
|
||||
from the cache.
|
||||
|
||||
You SHOULD NOT run two copies of rclone using the same VFS cache with
|
||||
the same or overlapping remotes if using --vfs-cache-mode > off. This
|
||||
can potentially cause data corruption if you do. You can work around
|
||||
this by giving each rclone its own cache hierarchy with --cache-dir. You
|
||||
don't need to worry about this if the remotes in use don't overlap.
|
||||
|
||||
--vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote
|
||||
@@ -5268,6 +5292,12 @@ for two reasons. Firstly because it is only checked every
|
||||
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
|
||||
from the cache.
|
||||
|
||||
You SHOULD NOT run two copies of rclone using the same VFS cache with
|
||||
the same or overlapping remotes if using --vfs-cache-mode > off. This
|
||||
can potentially cause data corruption if you do. You can work around
|
||||
this by giving each rclone its own cache hierarchy with --cache-dir. You
|
||||
don't need to worry about this if the remotes in use don't overlap.
|
||||
|
||||
--vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote
|
||||
@@ -5771,6 +5801,12 @@ for two reasons. Firstly because it is only checked every
|
||||
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
|
||||
from the cache.
|
||||
|
||||
You SHOULD NOT run two copies of rclone using the same VFS cache with
|
||||
the same or overlapping remotes if using --vfs-cache-mode > off. This
|
||||
can potentially cause data corruption if you do. You can work around
|
||||
this by giving each rclone its own cache hierarchy with --cache-dir. You
|
||||
don't need to worry about this if the remotes in use don't overlap.
|
||||
|
||||
--vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote
|
||||
@@ -10156,6 +10192,8 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if
|
||||
set
|
||||
|
||||
See the copy command command for more information on the above.
|
||||
|
||||
@@ -10167,6 +10205,8 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if
|
||||
set
|
||||
- deleteEmptySrcDirs - delete empty src directories if set
|
||||
|
||||
See the move command command for more information on the above.
|
||||
@@ -10179,6 +10219,8 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if
|
||||
set
|
||||
|
||||
See the sync command command for more information on the above.
|
||||
|
||||
@@ -11048,7 +11090,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.1")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
|
||||
|
||||
@@ -11178,7 +11220,7 @@ and may be set in the config file.
|
||||
--drive-starred-only Only show files that are starred.
|
||||
--drive-stop-on-download-limit Make download limit errors be fatal
|
||||
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-team-drive string ID of the Shared Drive (Team Drive)
|
||||
--drive-token string OAuth Access Token as a JSON blob.
|
||||
--drive-token-url string Token server url.
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
@@ -11459,8 +11501,13 @@ and may be set in the config file.
|
||||
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-token string OAuth Access Token as a JSON blob.
|
||||
--yandex-token-url string Token server url.
|
||||
--zoho-auth-url string Auth server URL.
|
||||
--zoho-client-id string OAuth Client Id
|
||||
--zoho-client-secret string OAuth Client Secret
|
||||
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
|
||||
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
--zoho-token string OAuth Access Token as a JSON blob.
|
||||
--zoho-token-url string Token server url.
|
||||
|
||||
|
||||
1Fichier
|
||||
@@ -12350,6 +12397,16 @@ You'd then do a full rclone sync less often.
|
||||
|
||||
Note that --fast-list isn't required in the top-up sync.
|
||||
|
||||
Avoiding HEAD requests after PUT
|
||||
|
||||
By default rclone will HEAD every object it uploads. It does this to
|
||||
check the object got uploaded correctly.
|
||||
|
||||
You can disable this with the --s3-no-head option - see there for more
|
||||
details.
|
||||
|
||||
Setting this flag increases the chance for undetected upload failures.
|
||||
|
||||
Hashes
|
||||
|
||||
For small objects which weren't uploaded as multipart uploads (objects
|
||||
@@ -16882,9 +16939,8 @@ would with any other remote, e.g. rclone copy D:\docs secret:\docs, and
|
||||
rclone will encrypt and decrypt as needed on the fly. If you access the
|
||||
wrapped remote remote:path directly you will bypass the encryption, and
|
||||
anything you read will be in encrypted form, and anything you write will
|
||||
be undencrypted. To avoid issues it is best to configure a dedicated
|
||||
path for encrypted content, and access it exclusively through a crypt
|
||||
remote.
|
||||
be unencrypted. To avoid issues it is best to configure a dedicated path
|
||||
for encrypted content, and access it exclusively through a crypt remote.
|
||||
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
@@ -17814,6 +17870,20 @@ can be set smaller if you are tight on memory.
|
||||
|
||||
Impersonate this user when using a business account.
|
||||
|
||||
Note that if you want to use impersonate, you should make sure this flag
|
||||
is set when running "rclone config" as this will cause rclone to request
|
||||
the "members.read" scope which it won't normally. This is needed to
|
||||
lookup a members email address into the internal ID that dropbox uses in
|
||||
the API.
|
||||
|
||||
Using the "members.read" scope will require a Dropbox Team Admin to
|
||||
approve during the OAuth flow.
|
||||
|
||||
You will have to use your own App (setting your own client_id and
|
||||
client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once v1.55
|
||||
or later is in use everywhere.
|
||||
|
||||
- Config: impersonate
|
||||
- Env Var: RCLONE_DROPBOX_IMPERSONATE
|
||||
- Type: string
|
||||
@@ -18257,9 +18327,8 @@ Example without a config file
|
||||
Implicit TLS
|
||||
|
||||
Rlone FTP supports implicit FTP over TLS servers (FTPS). This has to be
|
||||
enabled in the FTP backend config for the remote, or with
|
||||
[--ftp-tls]{#ftp-tls}. The default FTPS port is 990, not 21 and can be
|
||||
set with [--ftp-port]{#ftp-port}.
|
||||
enabled in the FTP backend config for the remote, or with --ftp-tls. The
|
||||
default FTPS port is 990, not 21 and can be set with --ftp-port.
|
||||
|
||||
Standard Options
|
||||
|
||||
@@ -19009,7 +19078,7 @@ This will guide you through an interactive setup process:
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
Configure this as a team drive?
|
||||
Configure this as a Shared Drive (Team Drive)?
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
@@ -19212,22 +19281,23 @@ you created/selected at step #1 - use rclone without specifying the
|
||||
--drive-impersonate option, like this:
|
||||
rclone -v foo@example.com lsf gdrive:backup
|
||||
|
||||
Team drives
|
||||
Shared drives (team drives)
|
||||
|
||||
If you want to configure the remote to point to a Google Team Drive then
|
||||
answer y to the question Configure this as a team drive?.
|
||||
If you want to configure the remote to point to a Google Shared Drive
|
||||
(previously known as Team Drives) then answer y to the question
|
||||
Configure this as a Shared Drive (Team Drive)?.
|
||||
|
||||
This will fetch the list of Team Drives from google and allow you to
|
||||
configure which one you want to use. You can also type in a team drive
|
||||
This will fetch the list of Shared Drives from google and allow you to
|
||||
configure which one you want to use. You can also type in a Shared Drive
|
||||
ID if you prefer.
|
||||
|
||||
For example:
|
||||
|
||||
Configure this as a team drive?
|
||||
Configure this as a Shared Drive (Team Drive)?
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Fetching team drive list...
|
||||
Fetching Shared Drive list...
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Rclone Test
|
||||
\ "xxxxxxxxxxxxxxxxxxxx"
|
||||
@@ -19235,7 +19305,7 @@ For example:
|
||||
\ "yyyyyyyyyyyyyyyyyyyy"
|
||||
3 / Rclone Test 3
|
||||
\ "zzzzzzzzzzzzzzzzzzzz"
|
||||
Enter a Team Drive ID> 1
|
||||
Enter a Shared Drive ID> 1
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
@@ -19635,7 +19705,7 @@ if you want use SA instead of interactive login.
|
||||
|
||||
--drive-team-drive
|
||||
|
||||
ID of the Team Drive
|
||||
ID of the Shared Drive (Team Drive)
|
||||
|
||||
- Config: team_drive
|
||||
- Env Var: RCLONE_DRIVE_TEAM_DRIVE
|
||||
@@ -20093,11 +20163,11 @@ Options:
|
||||
|
||||
drives
|
||||
|
||||
List the shared drives available to this account
|
||||
List the Shared Drives available to this account
|
||||
|
||||
rclone backend drives remote: [options] [<arguments>+]
|
||||
|
||||
This command lists the shared drives (teamdrives) available to this
|
||||
This command lists the Shared Drives (Team Drives) available to this
|
||||
account.
|
||||
|
||||
Usage:
|
||||
@@ -25693,7 +25763,7 @@ The Go SSH library disables the use of the aes128-cbc cipher by default,
|
||||
due to security concerns. This can be re-enabled on a per-connection
|
||||
basis by setting the use_insecure_cipher setting in the configuration
|
||||
file to true. Further details on the insecurity of this cipher can be
|
||||
found [in this paper] (http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf).
|
||||
found in this paper.
|
||||
|
||||
SFTP isn't supported under plan9 until this issue is fixed.
|
||||
|
||||
@@ -27183,6 +27253,24 @@ Standard Options
|
||||
|
||||
Here are the standard options specific to zoho (Zoho).
|
||||
|
||||
--zoho-client-id
|
||||
|
||||
OAuth Client Id Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
--zoho-client-secret
|
||||
|
||||
OAuth Client Secret Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
--zoho-region
|
||||
|
||||
Zoho region to connect to. You'll have to use the region you
|
||||
@@ -27206,6 +27294,33 @@ Advanced Options
|
||||
|
||||
Here are the advanced options specific to zoho (Zoho).
|
||||
|
||||
--zoho-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ZOHO_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
--zoho-auth-url
|
||||
|
||||
Auth server URL. Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ZOHO_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
--zoho-token-url
|
||||
|
||||
Token server url. Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ZOHO_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
--zoho-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
@@ -27707,12 +27822,62 @@ Options:
|
||||
CHANGELOG
|
||||
|
||||
|
||||
v1.54.1 - 2021-03-08
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- accounting: Fix --bwlimit when up or down is off (Nick
|
||||
Craig-Wood)
|
||||
- docs
|
||||
- Fix nesting of brackets and backticks in ftp docs
|
||||
(edwardxml)
|
||||
- Fix broken link in sftp page (edwardxml)
|
||||
- Fix typo in crypt.md (Romeo Kienzler)
|
||||
- Changelog: Correct link to digitalis.io (Alex JOST)
|
||||
- Replace #file-caching with #vfs-file-caching (Miron
|
||||
Veryanskiy)
|
||||
- Convert bogus example link to code (edwardxml)
|
||||
- Remove dead link from rc.md (edwardxml)
|
||||
- rc: Sync,copy,move: document createEmptySrcDirs parameter (Nick
|
||||
Craig-Wood)
|
||||
- lsjson: Fix unterminated JSON in the presence of errors (Nick
|
||||
Craig-Wood)
|
||||
- Mount
|
||||
- Fix mount dropping on macOS by setting --daemon-timeout 10m
|
||||
(Nick Craig-Wood)
|
||||
- VFS
|
||||
- Document simultaneous usage with the same cache shouldn't be
|
||||
used (Nick Craig-Wood)
|
||||
- B2
|
||||
- Automatically raise upload cutoff to avoid spurious error (Nick
|
||||
Craig-Wood)
|
||||
- Fix failed to create file system with application key limited to
|
||||
a prefix (Nick Craig-Wood)
|
||||
- Drive
|
||||
- Refer to Shared Drives instead of Team Drives (Nick Craig-Wood)
|
||||
- Dropbox
|
||||
- Add scopes to oauth request and optionally "members.read" (Nick
|
||||
Craig-Wood)
|
||||
- S3
|
||||
- Fix failed to create file system with folder level permissions
|
||||
policy (Nick Craig-Wood)
|
||||
- Fix Wasabi HEAD requests returning stale data by using only 1
|
||||
transport (Nick Craig-Wood)
|
||||
- Fix shared_credentials_file auth (Dmitry Chepurovskiy)
|
||||
- Add --s3-no-head to reducing costs docs (Nick Craig-Wood)
|
||||
- Union
|
||||
- Fix mkdir at root with remote:/ (Nick Craig-Wood)
|
||||
- Zoho
|
||||
- Fix custom client id's (buengese)
|
||||
|
||||
|
||||
v1.54.0 - 2021-02-02
|
||||
|
||||
See commits
|
||||
|
||||
- New backends
|
||||
- Compression remote (experimental)(buengese)
|
||||
- Compression remote (experimental) (buengese)
|
||||
- Enterprise File Fabric (Nick Craig-Wood)
|
||||
- This work was sponsored by Storage Made Easy
|
||||
- HDFS (Hadoop Distributed File System) (Yury Stankevich)
|
||||
@@ -27720,37 +27885,30 @@ See commits
|
||||
- New Features
|
||||
- Deglobalise the config (Nick Craig-Wood)
|
||||
- Global config now read from the context
|
||||
- Global config can be passed into the rc
|
||||
- This will enable passing of global config via the rc
|
||||
- This work was sponsored by Digitalis
|
||||
- Add --bwlimit for upload and download (Nick Craig-Wood)
|
||||
- Obey bwlimit in http Transport for better limiting
|
||||
- Enhance systemd integration (Hekmon)
|
||||
- log level identification
|
||||
- manual activation with flag
|
||||
- automatic systemd launch detection
|
||||
- log level identification, manual activation with flag,
|
||||
automatic systemd launch detection
|
||||
- Don't compile systemd log integration for non unix systems
|
||||
(Benjamin Gustin)
|
||||
- Add a download flag to hashsum and related commands to force
|
||||
rclone to download and hash files locally (lostheli)
|
||||
- Add a --download flag to md5sum/sha1sum/hashsum to force rclone
|
||||
to download and hash files locally (lostheli)
|
||||
- Add --progress-terminal-title to print ETA to terminal title
|
||||
(LaSombra)
|
||||
- Make backend env vars show in help as the defaults for backend
|
||||
flags (Nick Craig-Wood)
|
||||
- build
|
||||
- Raise minimum go version to go1.12 (Nick Craig-Wood)
|
||||
- check
|
||||
- Make the error count match up in the log message (Nick
|
||||
Craig-Wood)
|
||||
- cmd
|
||||
- Add --progress-terminal-title to print ETA to terminal title
|
||||
(LaSombra)
|
||||
- Make backend env vars show in help as the defaults for
|
||||
backend flags (Nick Craig-Wood)
|
||||
- dedupe
|
||||
- Add --by-hash to dedupe on hash not file name (Nick
|
||||
- Add --by-hash to dedupe on content hash not file name (Nick
|
||||
Craig-Wood)
|
||||
- Add --dedupe-mode list to just list dupes, changing nothing
|
||||
(Nick Craig-Wood)
|
||||
- Add warning if used on a remote which can't have duplicate
|
||||
names (Nick Craig-Wood)
|
||||
- flags: Improve error message when reading environment vars (Nick
|
||||
Craig-Wood)
|
||||
- fs
|
||||
- Add Shutdown optional method for backends (Nick Craig-Wood)
|
||||
- When using --files-from check files concurrently (zhucan)
|
||||
@@ -27764,7 +27922,7 @@ See commits
|
||||
- Highlight read errors instead of aborting (Claudio
|
||||
Bantaloukas)
|
||||
- Add sort by average size in directory (Adam Plánský)
|
||||
- Add toggle option for average size in directory - key 'a'
|
||||
- Add toggle option for average s3ize in directory - key 'a'
|
||||
(Adam Plánský)
|
||||
- Add empty folder flag into ncdu browser (Adam Plánský)
|
||||
- Add ! (errror) and . (unreadable) file flags to go with e
|
||||
@@ -27794,22 +27952,17 @@ See commits
|
||||
(Chaitanya Bankanhal)
|
||||
- Fix plugins initialization (negative0)
|
||||
- Bug Fixes
|
||||
- build
|
||||
- Explicitly set ARM version to fix build (Nick Craig-Wood)
|
||||
- Don't explicitly set ARM version to fix ARMv5 build (Nick
|
||||
Craig-Wood)
|
||||
- Fix nfpm install (Nick Craig-Wood)
|
||||
- Fix docker build by upgrading ilteoood/docker_buildx (Nick
|
||||
Craig-Wood)
|
||||
- Temporary fix for Windows build errors (Ivan Andreev)
|
||||
- fs
|
||||
- Fix nil pointer on copy & move operations directly to remote
|
||||
(Anagh Kumar Baranwal)
|
||||
- Fix parsing of .. when joining remotes (Nick Craig-Wood)
|
||||
- log: Fix enabling systemd logging when using --log-file (Nick
|
||||
Craig-Wood)
|
||||
- move: Fix data loss when moving the same object (Nick
|
||||
Craig-Wood)
|
||||
- check
|
||||
- Make the error count match up in the log message (Nick
|
||||
Craig-Wood)
|
||||
- move: Fix data loss when source and destination are the same
|
||||
object (Nick Craig-Wood)
|
||||
- operations
|
||||
- Fix --cutof-mode hard not cutting off immediately (Nick
|
||||
Craig-Wood)
|
||||
@@ -27820,7 +27973,7 @@ See commits
|
||||
- Fix --immutable errors retrying many times (Nick Craig-Wood)
|
||||
- Docs
|
||||
- Many fixes and a rewrite of the filtering docs (edwardxml)
|
||||
- Many spelling and grammar problems (Josh Soref)
|
||||
- Many spelling and grammar fixes (Josh Soref)
|
||||
- Doc fixes for commands delete, purge, rmdir, rmdirs and mount
|
||||
(albertony)
|
||||
- And thanks to these people for many doc fixes too numerous to
|
||||
@@ -27834,15 +27987,12 @@ See commits
|
||||
- Mount
|
||||
- Update systemd status with cache stats (Hekmon)
|
||||
- Disable bazil/fuse based mount on macOS (Nick Craig-Wood)
|
||||
- Make mount be cmount under macOS (Nick Craig-Wood)
|
||||
- Make rclone mount actually run rclone cmount under macOS
|
||||
(Nick Craig-Wood)
|
||||
- Implement mknod to make NFS file creation work (Nick Craig-Wood)
|
||||
- Make sure we don't call umount more than once (Nick Craig-Wood)
|
||||
- Don't call host.Umount if a signal has been received (Nick
|
||||
Craig-Wood)
|
||||
- More user friendly mounting as network drive on windows
|
||||
(albertony)
|
||||
- Cleanup OS specific option handling and documentation
|
||||
(albertony)
|
||||
- Detect if uid or gid are set in same option string: -o
|
||||
uid=123,gid=456 (albertony)
|
||||
- Don't attempt to unmount if fs has been destroyed already (Nick
|
||||
@@ -27863,37 +28013,37 @@ See commits
|
||||
filesystems (Riccardo Iaconelli)
|
||||
- Azure Blob
|
||||
- Add support for service principals (James Lim)
|
||||
- Utilize streaming capabilities (Denis Neuling)
|
||||
- Update SDK to v0.13.0 and fix API breakage (Nick Craig-Wood,
|
||||
Mitsuo Heijo)
|
||||
- Add support for managed identities (Brad Ackerman)
|
||||
- Add examples for access tier (Bob Pusateri)
|
||||
- Utilize the streaming capabilities from the SDK for multipart
|
||||
uploads (Denis Neuling)
|
||||
- Fix setting of mime types (Nick Craig-Wood)
|
||||
- Fix crash when listing outside a SAS URL's root (Nick
|
||||
Craig-Wood)
|
||||
- Delete archive tier blobs before update if
|
||||
--azureblob-archive-tier-delete (Nick Craig-Wood)
|
||||
- Add support for managed identities (Brad Ackerman)
|
||||
- Fix crash on startup (Nick Craig-Wood)
|
||||
- Add examples for access tier (Bob Pusateri)
|
||||
- Fix memory usage by upgrading the SDK and implementing a
|
||||
TransferManager (Nick Craig-Wood)
|
||||
- Fix memory usage by upgrading the SDK to v0.13.0 and
|
||||
implementing a TransferManager (Nick Craig-Wood)
|
||||
- Require go1.14+ to compile due to SDK changes (Nick Craig-Wood)
|
||||
- B2
|
||||
- Make NewObject use less expensive API calls (Nick Craig-Wood)
|
||||
- Fixed possible crash when accessing Backblaze b2 remote
|
||||
(lluuaapp)
|
||||
- This will improve --files-from and restic serve in
|
||||
particular
|
||||
- Fixed crash on an empty file name (lluuaapp)
|
||||
- Box
|
||||
- Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||
- Fix finding directories in a case insentive way (Nick
|
||||
Craig-Wood)
|
||||
- Chunker
|
||||
- Skip long local hashing, hash in-transit (fixes) (Ivan Andreev)
|
||||
- Set Features.ReadMimeType=false as Object.MimeType not supported
|
||||
(Nick Craig-Wood)
|
||||
- Set Features ReadMimeType to false as Object.MimeType not
|
||||
supported (Nick Craig-Wood)
|
||||
- Fix case-insensitive NewObject, test metadata detection (Ivan
|
||||
Andreev)
|
||||
- Drive
|
||||
- Implement "rclone backend copyid" command for copying files by
|
||||
ID (Nick Craig-Wood)
|
||||
- Implement rclone backend copyid command for copying files by ID
|
||||
(Nick Craig-Wood)
|
||||
- Added flag --drive-stop-on-download-limit to stop transfers when
|
||||
the download limit is exceeded (Anagh Kumar Baranwal)
|
||||
- Implement CleanUp workaround for team drives (buengese)
|
||||
@@ -27904,18 +28054,18 @@ See commits
|
||||
Rodriguez-Estivill)
|
||||
- Dropbox
|
||||
- Add support for viewing shared files and folders (buengese)
|
||||
- Implement IDer (buengese)
|
||||
- Set Features.ReadMimeType=false as Object.MimeType not supported
|
||||
(Nick Craig-Wood)
|
||||
- Tidy repeated error message (Nick Craig-Wood)
|
||||
- Enable short lived access tokens (Nick Craig-Wood)
|
||||
- Implement IDer on Objects so rclone lsf etc can read the IDs
|
||||
(buengese)
|
||||
- Set Features ReadMimeType to false as Object.MimeType not
|
||||
supported (Nick Craig-Wood)
|
||||
- Make malformed_path errors from too long files not retriable
|
||||
(Nick Craig-Wood)
|
||||
- Test file name length before upload to fix upload loop (Nick
|
||||
Craig-Wood)
|
||||
- Enable short lived access tokens (Nick Craig-Wood)
|
||||
- Fichier
|
||||
- Set Features.ReadMimeType=true as Object.MimeType is supported
|
||||
(Nick Craig-Wood)
|
||||
- Set Features ReadMimeType to true as Object.MimeType is
|
||||
supported (Nick Craig-Wood)
|
||||
- FTP
|
||||
- Add --ftp-disable-msld option to ignore MLSD for really old
|
||||
servers (Nick Craig-Wood)
|
||||
@@ -27924,39 +28074,40 @@ See commits
|
||||
- Storage class object header support (Laurens Janssen)
|
||||
- Fix anonymous client to use rclone's HTTP client (Nick
|
||||
Craig-Wood)
|
||||
- Fix Entry doesn't belong in directory "" (same as directory) -
|
||||
ignoring (Nick Craig-Wood)
|
||||
- Fix
|
||||
Entry doesn't belong in directory "" (same as directory) - ignoring
|
||||
(Nick Craig-Wood)
|
||||
- Googlephotos
|
||||
- New flag --gphotos-include-archived (Nicolas Rueff)
|
||||
- New flag --gphotos-include-archived to show archived photos as
|
||||
well (Nicolas Rueff)
|
||||
- Jottacloud
|
||||
- Don't erroniously report support for writing mime types
|
||||
- Don't erroneously report support for writing mime types
|
||||
(buengese)
|
||||
- Add support for Telia Cloud (#4930) (Patrik Nordlén)
|
||||
- Add support for Telia Cloud (Patrik Nordlén)
|
||||
- Mailru
|
||||
- Accept special folders eg camera-upload (Ivan Andreev)
|
||||
- Avoid prehashing of large local files (Ivan Andreev)
|
||||
- Fix uploads after recent changes on server (Ivan Andreev)
|
||||
- Fix range requests after June 2020 changes on server (Ivan
|
||||
Andreev)
|
||||
- Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
- Remove deprecated protocol quirks (Ivan Andreev)
|
||||
- Accept special folders eg camera-upload (Ivan Andreev)
|
||||
- Avoid prehashing of large local files (Ivan Andreev)
|
||||
- Memory
|
||||
- Fix setting of mime types (Nick Craig-Wood)
|
||||
- Onedrive
|
||||
- Add support for china region operated by 21vianet and other
|
||||
regional suppliers (#4963) (NyaMisty)
|
||||
- Add support for China region operated by 21vianet and other
|
||||
regional suppliers (NyaMisty)
|
||||
- Warn on gateway timeout errors (Nick Craig-Wood)
|
||||
- Fall back to normal copy if server-side copy unavailable (#4903)
|
||||
(Alex Chen)
|
||||
- Fall back to normal copy if server-side copy unavailable (Alex
|
||||
Chen)
|
||||
- Fix server-side copy completely disabled on OneDrive for
|
||||
Business (Cnly)
|
||||
- (business only) workaround to replace existing file on
|
||||
server-side copy (#4904) (Alex Chen)
|
||||
server-side copy (Alex Chen)
|
||||
- Enhance link creation with expiry, scope, type and password
|
||||
(Nick Craig-Wood)
|
||||
- Remove % and # from the set of encoded characters (#4909) (Alex
|
||||
Chen)
|
||||
- Support addressing site by server-relative URL (#4761) (kice)
|
||||
- Remove % and # from the set of encoded characters (Alex Chen)
|
||||
- Support addressing site by server-relative URL (kice)
|
||||
- Opendrive
|
||||
- Fix finding directories in a case insensitive way (Nick
|
||||
Craig-Wood)
|
||||
@@ -27972,18 +28123,18 @@ See commits
|
||||
- Added --s3-disable-http2 to disable http/2 (Anagh Kumar
|
||||
Baranwal)
|
||||
- Complete SSE-C implementation (Nick Craig-Wood)
|
||||
- Fix hashes on small files with AWS:KMS and SSE-C (Nick
|
||||
Craig-Wood)
|
||||
- Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C (Nick
|
||||
Craig-Wood)
|
||||
- Fix hashes on small files with AWS:KMS and SSE-C (Nick
|
||||
Craig-Wood)
|
||||
- Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C
|
||||
(Nick Craig-Wood)
|
||||
- Add --s3-no-head parameter to minimise transactions on upload
|
||||
(Nick Craig-Wood)
|
||||
- Update docs with a Reducing Costs section (Nick Craig-Wood)
|
||||
- Added error handling for error code 429 indicating too many
|
||||
requests (Anagh Kumar Baranwal)
|
||||
- Add requester pays option (kelv)
|
||||
- Fix copy multipart with v2 auth failing with
|
||||
'SignatureDoesNotMatch' (Louis Koo)
|
||||
- Add --s3-no-head parameter to minimise transactions on upload
|
||||
(Nick Craig-Wood)
|
||||
- SFTP
|
||||
- Allow cert based auth via optional pubkey (Stephen Harris)
|
||||
- Allow user to optionally check server hosts key to add security
|
||||
@@ -27994,7 +28145,8 @@ See commits
|
||||
- Implement Shutdown method (Nick Craig-Wood)
|
||||
- Implement keyboard interactive authentication (Nick Craig-Wood)
|
||||
- Make --tpslimit apply (Nick Craig-Wood)
|
||||
- Implement --sftp-use-fstat (Nick Craig-Wood)
|
||||
- Implement --sftp-use-fstat for unusual SFTP servers (Nick
|
||||
Craig-Wood)
|
||||
- Sugarsync
|
||||
- Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||
- Fix finding directories in a case insentive way (Nick
|
||||
@@ -28010,7 +28162,7 @@ See commits
|
||||
- Updated docs to show streaming to nextcloud is working (Durval
|
||||
Menezes)
|
||||
- Yandex
|
||||
- Set Features.WriteMimeType=false as Yandex ignores mime types
|
||||
- Set Features WriteMimeType to false as Yandex ignores mime types
|
||||
(Nick Craig-Wood)
|
||||
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -93,7 +93,7 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
cd /tmp && go get github.com/goreleaser/nfpm/v2/...
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
|
||||
@@ -4,7 +4,7 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
## Making a release
|
||||
|
||||
@@ -47,8 +47,7 @@ import (
|
||||
// The following types of chunks are supported:
|
||||
// data and control, active and temporary.
|
||||
// Chunk type is identified by matching chunk file name
|
||||
// based on the chunk name format configured by user and transaction
|
||||
// style being used.
|
||||
// based on the chunk name format configured by user.
|
||||
//
|
||||
// Both data and control chunks can be either temporary (aka hidden)
|
||||
// or active (non-temporary aka normal aka permanent).
|
||||
@@ -64,12 +63,6 @@ import (
|
||||
// which is transparently converted to the new format. In its maximum
|
||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||
//
|
||||
// When transactions is set to the norename style, data chunks will
|
||||
// keep their temporary chunk names (with the transacion identifier
|
||||
// suffix). To distinguish them from temporary chunks, the txn field
|
||||
// of the metadata file is set to match the transaction identifier of
|
||||
// the data chunks.
|
||||
//
|
||||
// Chunker can tell data chunks from control chunks by the characters
|
||||
// located in the "hash placeholder" position of configured format.
|
||||
// Data chunks have decimal digits there.
|
||||
@@ -108,7 +101,7 @@ const maxMetadataSize = 1023
|
||||
const maxMetadataSizeWritten = 255
|
||||
|
||||
// Current/highest supported metadata format.
|
||||
const metadataVersion = 2
|
||||
const metadataVersion = 1
|
||||
|
||||
// optimizeFirstChunk enables the following optimization in the Put:
|
||||
// If a single chunk is expected, put the first chunk using the
|
||||
@@ -231,31 +224,6 @@ It has the following fields: ver, size, nchunks, md5, sha1.`,
|
||||
Help: "Warn user, skip incomplete file and proceed.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "transactions",
|
||||
Advanced: true,
|
||||
Default: "rename",
|
||||
Help: `Choose how chunker should handle temporary files during transactions.`,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "rename",
|
||||
Help: "Rename temporary files after a successful transaction.",
|
||||
}, {
|
||||
Value: "norename",
|
||||
Help: `Leave temporary file names and write transaction ID to metadata file.
|
||||
Metadata is required for no rename transactions (meta format cannot be "none").
|
||||
If you are using norename transactions you should be careful not to downgrade Rclone
|
||||
as older versions of Rclone don't support this transaction style and will misinterpret
|
||||
files manipulated by norename transactions.
|
||||
This method is EXPERIMENTAL, don't use on production systems.`,
|
||||
}, {
|
||||
Value: "auto",
|
||||
Help: `Rename or norename will be used depending on capabilities of the backend.
|
||||
If meta format is set to "none", rename transactions will always be used.
|
||||
This method is EXPERIMENTAL, don't use on production systems.`,
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -303,7 +271,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -341,14 +309,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NameFormat string `config:"name_format"`
|
||||
StartFrom int `config:"start_from"`
|
||||
MetaFormat string `config:"meta_format"`
|
||||
HashType string `config:"hash_type"`
|
||||
FailHard bool `config:"fail_hard"`
|
||||
Transactions string `config:"transactions"`
|
||||
Remote string `config:"remote"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NameFormat string `config:"name_format"`
|
||||
StartFrom int `config:"start_from"`
|
||||
MetaFormat string `config:"meta_format"`
|
||||
HashType string `config:"hash_type"`
|
||||
FailHard bool `config:"fail_hard"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -370,13 +337,12 @@ type Fs struct {
|
||||
opt Options // copy of Options
|
||||
features *fs.Features // optional features
|
||||
dirSort bool // reserved for future, ignored
|
||||
useNoRename bool // can be set with the transactions option
|
||||
}
|
||||
|
||||
// configure sets up chunker for given name format, meta format and hash type.
|
||||
// It also seeds the source of random transaction identifiers.
|
||||
// configure must be called only from NewFs or by unit tests.
|
||||
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
|
||||
func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
|
||||
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
||||
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
|
||||
}
|
||||
@@ -386,9 +352,6 @@ func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string)
|
||||
if err := f.setHashType(hashType); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.setTransactionMode(transactionMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
randomSeed := time.Now().UnixNano()
|
||||
f.xactIDRand = rand.New(rand.NewSource(randomSeed))
|
||||
@@ -448,27 +411,6 @@ func (f *Fs) setHashType(hashType string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setTransactionMode(transactionMode string) error {
|
||||
switch transactionMode {
|
||||
case "rename":
|
||||
f.useNoRename = false
|
||||
case "norename":
|
||||
if !f.useMeta {
|
||||
return errors.New("incompatible transaction options")
|
||||
}
|
||||
f.useNoRename = true
|
||||
case "auto":
|
||||
f.useNoRename = !f.CanQuickRename()
|
||||
if f.useNoRename && !f.useMeta {
|
||||
f.useNoRename = false
|
||||
return errors.New("using norename transactions requires metadata")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported transaction mode '%s'", transactionMode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setChunkNameFormat converts pattern based chunk name format
|
||||
// into Printf format and Regular expressions for data and
|
||||
// control chunks.
|
||||
@@ -751,7 +693,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
byRemote := make(map[string]*Object)
|
||||
badEntry := make(map[string]bool)
|
||||
isSubdir := make(map[string]bool)
|
||||
txnByRemote := map[string]string{}
|
||||
|
||||
var tempEntries fs.DirEntries
|
||||
for _, dirOrObject := range sortedEntries {
|
||||
@@ -764,18 +705,12 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
if f.useNoRename {
|
||||
txnByRemote[remote], err = object.readXactID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
// this is some kind of chunk
|
||||
// metobject should have been created above if present
|
||||
isSpecial := xactID != "" || ctrlType != ""
|
||||
mainObject := byRemote[mainRemote]
|
||||
isSpecial := xactID != txnByRemote[mainRemote] || ctrlType != ""
|
||||
if mainObject == nil && f.useMeta && !isSpecial {
|
||||
fs.Debugf(f, "skip orphan data chunk %q", remote)
|
||||
break
|
||||
@@ -874,11 +809,10 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
}
|
||||
|
||||
var (
|
||||
o *Object
|
||||
baseObj fs.Object
|
||||
currentXactID string
|
||||
err error
|
||||
sameMain bool
|
||||
o *Object
|
||||
baseObj fs.Object
|
||||
err error
|
||||
sameMain bool
|
||||
)
|
||||
|
||||
if f.useMeta {
|
||||
@@ -922,14 +856,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
return nil, errors.Wrap(err, "can't detect composite file")
|
||||
}
|
||||
|
||||
if f.useNoRename {
|
||||
currentXactID, err = o.readXactID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
caseInsensitive := f.features.CaseInsensitive
|
||||
|
||||
for _, dirOrObject := range entries {
|
||||
entry, ok := dirOrObject.(fs.Object)
|
||||
if !ok {
|
||||
@@ -951,7 +878,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
if !sameMain {
|
||||
continue // skip alien chunks
|
||||
}
|
||||
if ctrlType != "" || xactID != currentXactID {
|
||||
if ctrlType != "" || xactID != "" {
|
||||
if f.useMeta {
|
||||
// temporary/control chunk calls for lazy metadata read
|
||||
o.unsure = true
|
||||
@@ -1066,57 +993,12 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
}
|
||||
o.md5 = metaInfo.md5
|
||||
o.sha1 = metaInfo.sha1
|
||||
o.xactID = metaInfo.xactID
|
||||
}
|
||||
|
||||
o.isFull = true // cache results
|
||||
o.xIDCached = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// readXactID returns the transaction ID stored in the passed metadata object
|
||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
// if xactID has already been read and cahced return it now
|
||||
if o.xIDCached {
|
||||
return o.xactID, nil
|
||||
}
|
||||
// Avoid reading metadata for backends that don't use xactID to identify permanent chunks
|
||||
if !o.f.useNoRename {
|
||||
return "", errors.New("readXactID requires norename transactions")
|
||||
}
|
||||
if o.main == nil {
|
||||
return "", errors.New("readXactID requires valid metaobject")
|
||||
}
|
||||
if o.main.Size() > maxMetadataSize {
|
||||
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
||||
}
|
||||
reader, err := o.main.Open(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch o.f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
err = json.Unmarshal(data, &metadata)
|
||||
if err != nil {
|
||||
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
||||
}
|
||||
xactID = metadata.XactID
|
||||
}
|
||||
o.xactID = xactID
|
||||
o.xIDCached = true
|
||||
return xactID, nil
|
||||
}
|
||||
|
||||
// put implements Put, PutStream, PutUnchecked, Update
|
||||
func (f *Fs) put(
|
||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||
@@ -1269,17 +1151,14 @@ func (f *Fs) put(
|
||||
// If previous object was chunked, remove its chunks
|
||||
f.removeOldChunks(ctx, baseRemote)
|
||||
|
||||
if !f.useNoRename {
|
||||
// The transaction suffix will be removed for backends with quick rename operations
|
||||
for chunkNo, chunk := range c.chunks {
|
||||
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
|
||||
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
|
||||
if errMove != nil {
|
||||
return nil, errMove
|
||||
}
|
||||
c.chunks[chunkNo] = chunkMoved
|
||||
// Rename data chunks from temporary to final names
|
||||
for chunkNo, chunk := range c.chunks {
|
||||
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
|
||||
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
|
||||
if errMove != nil {
|
||||
return nil, errMove
|
||||
}
|
||||
xactID = ""
|
||||
c.chunks[chunkNo] = chunkMoved
|
||||
}
|
||||
|
||||
if !f.useMeta {
|
||||
@@ -1299,7 +1178,7 @@ func (f *Fs) put(
|
||||
switch f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
c.updateHashes()
|
||||
metadata, err = marshalSimpleJSON(ctx, sizeTotal, len(c.chunks), c.md5, c.sha1, xactID)
|
||||
metadata, err = marshalSimpleJSON(ctx, sizeTotal, len(c.chunks), c.md5, c.sha1)
|
||||
}
|
||||
if err == nil {
|
||||
metaInfo := f.wrapInfo(src, baseRemote, int64(len(metadata)))
|
||||
@@ -1311,7 +1190,6 @@ func (f *Fs) put(
|
||||
|
||||
o := f.newObject("", metaObject, c.chunks)
|
||||
o.size = sizeTotal
|
||||
o.xactID = xactID
|
||||
return o, nil
|
||||
}
|
||||
|
||||
@@ -1715,7 +1593,7 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
var metadata []byte
|
||||
switch f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
metadata, err = marshalSimpleJSON(ctx, newObj.size, len(newChunks), md5, sha1, o.xactID)
|
||||
metadata, err = marshalSimpleJSON(ctx, newObj.size, len(newChunks), md5, sha1)
|
||||
if err == nil {
|
||||
metaInfo := f.wrapInfo(metaObject, "", int64(len(metadata)))
|
||||
err = newObj.main.Update(ctx, bytes.NewReader(metadata), metaInfo)
|
||||
@@ -1931,13 +1809,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
if entryType == fs.EntryObject {
|
||||
mainPath, _, _, xactID := f.parseChunkName(path)
|
||||
metaXactID := ""
|
||||
if f.useNoRename {
|
||||
metaObject, _ := f.base.NewObject(ctx, mainPath)
|
||||
dummyObject := f.newObject("", metaObject, nil)
|
||||
metaXactID, _ = dummyObject.readXactID(ctx)
|
||||
}
|
||||
if mainPath != "" && xactID == metaXactID {
|
||||
if mainPath != "" && xactID == "" {
|
||||
path = mainPath
|
||||
}
|
||||
}
|
||||
@@ -1958,17 +1830,15 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// Object represents a composite file wrapping one or more data chunks
|
||||
type Object struct {
|
||||
remote string
|
||||
main fs.Object // meta object if file is composite, or wrapped non-chunked file, nil if meta format is 'none'
|
||||
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
||||
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
||||
isFull bool // true if metadata has been read
|
||||
xIDCached bool // true if xactID has been read
|
||||
unsure bool // true if need to read metadata to detect object type
|
||||
xactID string // transaction ID for "norename" or empty string for "renamed" chunks
|
||||
md5 string
|
||||
sha1 string
|
||||
f *Fs
|
||||
remote string
|
||||
main fs.Object // meta object if file is composite, or wrapped non-chunked file, nil if meta format is 'none'
|
||||
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
||||
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
||||
isFull bool // true if metadata has been read
|
||||
unsure bool // true if need to read metadata to detect object type
|
||||
md5 string
|
||||
sha1 string
|
||||
f *Fs
|
||||
}
|
||||
|
||||
func (o *Object) addChunk(chunk fs.Object, chunkNo int) error {
|
||||
@@ -2296,7 +2166,6 @@ type ObjectInfo struct {
|
||||
src fs.ObjectInfo
|
||||
fs *Fs
|
||||
nChunks int // number of data chunks
|
||||
xactID string // transaction ID for "norename" or empty string for "renamed" chunks
|
||||
size int64 // overrides source size by the total size of data chunks
|
||||
remote string // overrides remote name
|
||||
md5 string // overrides MD5 checksum
|
||||
@@ -2395,9 +2264,8 @@ type metaSimpleJSON struct {
|
||||
Size *int64 `json:"size"` // total size of data chunks
|
||||
ChunkNum *int `json:"nchunks"` // number of data chunks
|
||||
// optional extra fields
|
||||
MD5 string `json:"md5,omitempty"`
|
||||
SHA1 string `json:"sha1,omitempty"`
|
||||
XactID string `json:"txn,omitempty"` // transaction ID for norename transactions
|
||||
MD5 string `json:"md5,omitempty"`
|
||||
SHA1 string `json:"sha1,omitempty"`
|
||||
}
|
||||
|
||||
// marshalSimpleJSON
|
||||
@@ -2407,20 +2275,16 @@ type metaSimpleJSON struct {
|
||||
// - if file contents can be mistaken as meta object
|
||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||
//
|
||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 string) ([]byte, error) {
|
||||
version := metadataVersion
|
||||
if xactID == "" && version == 2 {
|
||||
version = 1
|
||||
}
|
||||
metadata := metaSimpleJSON{
|
||||
// required core fields
|
||||
Version: &version,
|
||||
Size: &size,
|
||||
ChunkNum: &nChunks,
|
||||
// optional extra fields
|
||||
MD5: md5,
|
||||
SHA1: sha1,
|
||||
XactID: xactID,
|
||||
MD5: md5,
|
||||
SHA1: sha1,
|
||||
}
|
||||
data, err := json.Marshal(&metadata)
|
||||
if err == nil && data != nil && len(data) >= maxMetadataSizeWritten {
|
||||
@@ -2498,7 +2362,6 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
info.nChunks = *metadata.ChunkNum
|
||||
info.md5 = metadata.MD5
|
||||
info.sha1 = metadata.SHA1
|
||||
info.xactID = metadata.XactID
|
||||
return info, true, nil
|
||||
}
|
||||
|
||||
@@ -2531,11 +2394,6 @@ func (f *Fs) Precision() time.Duration {
|
||||
return f.base.Precision()
|
||||
}
|
||||
|
||||
// CanQuickRename returns true if the Fs supports a quick rename operation
|
||||
func (f *Fs) CanQuickRename() bool {
|
||||
return f.base.Features().Move != nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -468,15 +468,9 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
return obj
|
||||
}
|
||||
billyObj := newFile("billy")
|
||||
billyTxn := billyObj.(*Object).xactID
|
||||
if f.useNoRename {
|
||||
require.True(t, billyTxn != "")
|
||||
} else {
|
||||
require.True(t, billyTxn == "")
|
||||
}
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
@@ -493,8 +487,6 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
// accessing chunks in strict mode is prohibited
|
||||
f.opt.FailHard = true
|
||||
billyChunk4Name := billyChunkName(4)
|
||||
_, err = f.base.NewObject(ctx, billyChunk4Name)
|
||||
require.NoError(t, err)
|
||||
_, err = f.NewObject(ctx, billyChunk4Name)
|
||||
assertOverlapError(err)
|
||||
|
||||
@@ -528,8 +520,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyTxn := willyObj.(*Object).xactID
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
@@ -570,20 +561,17 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(100)
|
||||
|
||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||
filename = path.Join(dir, name)
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||
txnID = chunkObj.xactID
|
||||
}
|
||||
return
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName, fileTxn := newFile(f, "wreaker")
|
||||
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
@@ -662,7 +650,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
}
|
||||
}
|
||||
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||
require.NoError(t, err)
|
||||
todaysMeta := string(metaData)
|
||||
runSubtest(todaysMeta, "today")
|
||||
@@ -676,7 +664,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||
// test that chunker refuses to change on objects with future/unknowm metadata
|
||||
func testFutureProof(t *testing.T, f *Fs) {
|
||||
if f.opt.MetaFormat == "none" {
|
||||
t.Skip("this test requires metadata support")
|
||||
@@ -750,100 +738,6 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
}
|
||||
}
|
||||
|
||||
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
|
||||
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
|
||||
func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
t.Skip("Can't do norename transactions without metadata")
|
||||
}
|
||||
const dir = "backcomp"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
saveUseNoRename := f.useNoRename
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
f.useNoRename = saveUseNoRename
|
||||
}()
|
||||
f.opt.ChunkSize = fs.SizeSuffix(10)
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(250)
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
f.useNoRename = false
|
||||
file, fileName := newFile(f, "renamefile")
|
||||
|
||||
f.opt.FailHard = false
|
||||
item := fstest.NewItem(fileName, contents, modTime)
|
||||
|
||||
var items []fstest.Item
|
||||
items = append(items, item)
|
||||
|
||||
f.useNoRename = true
|
||||
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
|
||||
_, err := f.NewObject(ctx, fileName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.List(ctx, dir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
_ = file.Remove(ctx)
|
||||
}
|
||||
|
||||
func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
if !f.useMeta {
|
||||
t.Skip("Can't test norename transactions without metadata")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
const dir = "servermovetest"
|
||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
|
||||
|
||||
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
|
||||
assert.NoError(t, err)
|
||||
fs1, isChunkerFs := subFs1.(*Fs)
|
||||
assert.True(t, isChunkerFs)
|
||||
fs1.useNoRename = false
|
||||
fs1.opt.ChunkSize = fs.SizeSuffix(3)
|
||||
|
||||
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
|
||||
assert.NoError(t, err)
|
||||
fs2, isChunkerFs := subFs2.(*Fs)
|
||||
assert.True(t, isChunkerFs)
|
||||
fs2.useNoRename = true
|
||||
fs2.opt.ChunkSize = fs.SizeSuffix(3)
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||
contents := "abcdef"
|
||||
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
|
||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(len(contents)), dstFile.Size())
|
||||
|
||||
r, err := dstFile.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
@@ -870,12 +764,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("FutureProof", func(t *testing.T) {
|
||||
testFutureProof(t, f)
|
||||
})
|
||||
t.Run("BackwardsCompatibility", func(t *testing.T) {
|
||||
testBackwardsCompatibility(t, f)
|
||||
})
|
||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||
testChunkerServerSideMove(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -101,21 +101,6 @@ names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_data_encryption",
|
||||
Help: "Option to either encrypt file data or leave it unencrypted.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
Help: "Don't encrypt file data, leave it unencrypted.",
|
||||
},
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -224,7 +209,6 @@ type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
NoDataEncryption bool `config:"no_data_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
@@ -362,10 +346,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
}
|
||||
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||
if err != nil {
|
||||
@@ -637,10 +617,6 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
if f.opt.NoDataEncryption {
|
||||
return src.Hash(ctx, hashType)
|
||||
}
|
||||
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
@@ -846,13 +822,9 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
size := o.Object.Size()
|
||||
if !o.f.opt.NoDataEncryption {
|
||||
var err error
|
||||
size, err = o.f.cipher.DecryptedSize(size)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
}
|
||||
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
}
|
||||
return size
|
||||
}
|
||||
@@ -870,10 +842,6 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if o.f.opt.NoDataEncryption {
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
|
||||
@@ -91,26 +91,3 @@ func TestObfuscate(t *testing.T) {
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestNoDataObfuscate runs integration tests against the remote
|
||||
func TestNoDataObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt4"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3427,10 +3427,11 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
||||
if url == "" {
|
||||
return nil, nil, errors.New("forbidden to download - check sharing permission")
|
||||
}
|
||||
req, err = http.NewRequestWithContext(ctx, method, url, nil)
|
||||
req, err = http.NewRequest(method, url, nil)
|
||||
if err != nil {
|
||||
return req, nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
|
||||
@@ -77,10 +77,11 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
return false, err
|
||||
}
|
||||
var req *http.Request
|
||||
req, err = http.NewRequestWithContext(ctx, method, urls, body)
|
||||
req, err = http.NewRequest(method, urls, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
@@ -113,7 +114,8 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.ContentLength = reqSize
|
||||
totalSize := "*"
|
||||
if rx.ContentLength >= 0 {
|
||||
|
||||
@@ -219,11 +219,11 @@ shared folder.`,
|
||||
// as invalid characters.
|
||||
// Testing revealed names with trailing spaces and the DEL character don't work.
|
||||
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
Default: encoder.Base |
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8,
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -242,10 +242,8 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
svc files.Client // the connection to the dropbox server (unauthorized)
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
team team.Client // for the Teams API
|
||||
@@ -369,29 +367,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
cfg := dropbox.Config{
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
// unauthorized config for endpoints that fail with auth
|
||||
ucfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
}
|
||||
|
||||
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
|
||||
f.team = team.New(cfg)
|
||||
f.team = team.New(config)
|
||||
|
||||
if opt.Impersonate != "" {
|
||||
|
||||
user := team.UserSelectorArg{
|
||||
Email: opt.Impersonate,
|
||||
}
|
||||
@@ -406,13 +397,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(cfg)
|
||||
f.svc = files.New(ucfg)
|
||||
f.sharing = sharing.New(cfg)
|
||||
f.users = users.New(cfg)
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: false,
|
||||
@@ -671,7 +661,7 @@ func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// mountSharedFolder mount a shared folder to the root namespace
|
||||
// mountSharedFolders mount a shared folder to the root namespace
|
||||
func (f *Fs) mountSharedFolder(id string) error {
|
||||
arg := sharing.MountFolderArg{
|
||||
SharedFolderId: id,
|
||||
@@ -683,7 +673,7 @@ func (f *Fs) mountSharedFolder(id string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// listReceivedFiles lists shared the user as access to (note this means individual
|
||||
// listSharedFolders lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
@@ -1201,159 +1191,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the StartCursor early so all changes from now on get processed
|
||||
startCursor, err := f.changeNotifyCursor()
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StartCursor: %s", err)
|
||||
}
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
select {
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
if pollInterval != 0 {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
if startCursor == "" {
|
||||
startCursor, err = f.changeNotifyCursor()
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StartCursor: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
fs.Debugf(f, "Checking for changes on remote")
|
||||
startCursor, err = f.changeNotifyRunner(ctx, notifyFunc, startCursor)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyCursor() (cursor string, err error) {
|
||||
var startCursor *files.ListFolderGetLatestCursorResult
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
Recursive: true,
|
||||
}
|
||||
|
||||
if arg.Path == "/" {
|
||||
arg.Path = ""
|
||||
}
|
||||
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
|
||||
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return startCursor.Cursor, nil
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startCursor string) (newCursor string, err error) {
|
||||
cursor := startCursor
|
||||
var res *files.ListFolderLongpollResult
|
||||
|
||||
// Dropbox sets a timeout range of 30 - 480
|
||||
timeout := uint64(f.ci.TimeoutOrInfinite() / time.Second)
|
||||
|
||||
if timeout < 30 {
|
||||
timeout = 30
|
||||
}
|
||||
|
||||
if timeout > 480 {
|
||||
timeout = 480
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
args := files.ListFolderLongpollArg{
|
||||
Cursor: cursor,
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
res, err = f.svc.ListFolderLongpoll(&args)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !res.Changes {
|
||||
return cursor, nil
|
||||
}
|
||||
|
||||
if res.Backoff != 0 {
|
||||
fs.Debugf(f, "Waiting to poll for %d seconds", res.Backoff)
|
||||
time.Sleep(time.Duration(res.Backoff) * time.Second)
|
||||
}
|
||||
|
||||
for {
|
||||
var changeList *files.ListFolderResult
|
||||
|
||||
arg := files.ListFolderContinueArg{
|
||||
Cursor: cursor,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
changeList, err = f.srv.ListFolderContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "list continue")
|
||||
}
|
||||
cursor = changeList.Cursor
|
||||
var entryType fs.EntryType
|
||||
for _, entry := range changeList.Entries {
|
||||
entryPath := ""
|
||||
switch info := entry.(type) {
|
||||
case *files.FolderMetadata:
|
||||
entryType = fs.EntryDirectory
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.FileMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.DeletedMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
default:
|
||||
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
||||
continue
|
||||
}
|
||||
|
||||
if entryPath != "" {
|
||||
notifyFunc(entryPath, entryType)
|
||||
}
|
||||
}
|
||||
if !changeList.HasMore {
|
||||
break
|
||||
}
|
||||
}
|
||||
return cursor, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(DbHashType)
|
||||
|
||||
@@ -48,41 +48,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||
request := FileInfoRequest{
|
||||
URL: url,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/info.cgi",
|
||||
}
|
||||
|
||||
var file File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read file info")
|
||||
}
|
||||
|
||||
return &file, err
|
||||
}
|
||||
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
@@ -343,56 +308,6 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) {
|
||||
request := &MoveFileRequest{
|
||||
URLs: []string{url},
|
||||
FolderID: folderID,
|
||||
Rename: rename,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
FolderID: folderID,
|
||||
Rename: rename,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/cp.cgi",
|
||||
}
|
||||
|
||||
response = &CopyFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
|
||||
@@ -363,6 +363,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
fs: f,
|
||||
remote: remote,
|
||||
file: File{
|
||||
ACL: 0,
|
||||
CDN: 0,
|
||||
Checksum: link.Whirlpool,
|
||||
ContentType: "",
|
||||
@@ -415,79 +416,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0])
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
dstObj.setMetaData(*file)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
dstObj.setMetaData(*file)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -72,10 +72,6 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(file File) {
|
||||
o.file = file
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, o.file.Size)
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
package fichier
|
||||
|
||||
// FileInfoRequest is the request structure of the corresponding request
|
||||
type FileInfoRequest struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// ListFolderRequest is the request structure of the corresponding request
|
||||
type ListFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
@@ -54,39 +49,6 @@ type MakeFolderResponse struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// MoveFileRequest is the request structure of the corresponding request
|
||||
type MoveFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
FolderID int `json:"destination_folder_id"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveFileResponse is the response structure of the corresponding request
|
||||
type MoveFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
FolderID int `json:"folder_id"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// CopyFileResponse is the response structure of the corresponding request
|
||||
type CopyFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
@@ -124,6 +86,7 @@ type EndFileUploadResponse struct {
|
||||
|
||||
// File is the structure how 1Fichier returns a File
|
||||
type File struct {
|
||||
ACL int `json:"acl"`
|
||||
CDN int `json:"cdn"`
|
||||
Checksum string `json:"checksum"`
|
||||
ContentType string `json:"content-type"`
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"path"
|
||||
"runtime"
|
||||
@@ -21,7 +20,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -93,17 +91,6 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Help: "Disable using MLSD even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -131,7 +118,6 @@ type Options struct {
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -148,9 +134,7 @@ type Fs struct {
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -227,36 +211,25 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type dialCtx struct {
|
||||
f *Fs
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// dial a new connection with fshttp dialer
|
||||
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
||||
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.f.tlsConf != nil {
|
||||
conn = tls.Client(conn, d.f.tlsConf)
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
dCtx := dialCtx{f, ctx}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||
if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(f.ci.ConnectTimeout)}
|
||||
if f.opt.TLS && f.opt.ExplicitTLS {
|
||||
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
} else if f.opt.TLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
@@ -335,32 +308,9 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// Drain the pool of any connections
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if cErr := c.Quit(); cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
@@ -388,16 +338,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
if opt.TLS {
|
||||
protocol = "ftps://"
|
||||
}
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
@@ -410,15 +350,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
tlsConf: tlsConfig,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
@@ -447,12 +382,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.drainPool(ctx)
|
||||
}
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
@@ -598,7 +527,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}()
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
||||
timer := time.NewTimer(f.ci.Timeout)
|
||||
select {
|
||||
case listErr = <-errchan:
|
||||
timer.Stop()
|
||||
@@ -1061,6 +990,5 @@ var (
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -1028,10 +1028,11 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
|
||||
@@ -109,7 +109,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
dirname := path.Dir(realpath)
|
||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||
|
||||
err := o.fs.client.MkdirAll(dirname, 755)
|
||||
err := o.fs.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -183,8 +183,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
@@ -390,10 +391,11 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
// Do the request
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||
req, err := http.NewRequest("GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
if err == nil {
|
||||
@@ -545,10 +547,11 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
url := o.url()
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
@@ -593,10 +596,11 @@ func (o *Object) Storable() bool {
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ func newAuth(f *Fs) *auth {
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
@@ -38,7 +38,7 @@ func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Reques
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
func (a *auth) Response(resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
swiftLib "github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -110,10 +110,11 @@ func (f *Fs) String() string {
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -163,7 +164,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
err = c.Authenticate()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
}
|
||||
|
||||
@@ -148,17 +148,6 @@ Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads
|
||||
@@ -202,7 +191,6 @@ type Options struct {
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
CaseSensitive bool `config:"case_sensitive"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
NoPreAllocate bool `config:"no_preallocate"`
|
||||
NoSparse bool `config:"no_sparse"`
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
@@ -1139,12 +1127,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !o.fs.opt.NoPreAllocate {
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
out = f
|
||||
} else {
|
||||
@@ -1231,11 +1217,9 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
return nil, err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
if !f.opt.NoPreAllocate {
|
||||
err = file.PreAllocate(size, out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
err = file.PreAllocate(size, out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
if !f.opt.NoSparse && file.SetSparseImplemented {
|
||||
sparseWarning.Do(func() {
|
||||
|
||||
@@ -1088,7 +1088,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// waitForJob waits for the job with status in url to complete
|
||||
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
deadline := time.Now().Add(f.ci.TimeoutOrInfinite())
|
||||
deadline := time.Now().Add(f.ci.Timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
@@ -1126,7 +1126,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
|
||||
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
|
||||
@@ -282,10 +282,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
|
||||
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://upload.put.io/files/", nil)
|
||||
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name)))
|
||||
@@ -427,19 +428,21 @@ func (f *Fs) transferChunk(ctx context.Context, location string, start int64, ch
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", location, nil)
|
||||
req, err := http.NewRequest("HEAD", location, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "PATCH", location, in)
|
||||
req, err := http.NewRequest("PATCH", location, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
||||
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
||||
|
||||
@@ -229,10 +229,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, storageURL, nil)
|
||||
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||
|
||||
// merge headers with extra headers
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -3171,25 +3171,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Metadata for upload
|
||||
metadata := map[string]*string{}
|
||||
fs.Debugf(o, "src = %#v", src)
|
||||
|
||||
// Read metadata from source s3 object if available
|
||||
srcObj, ok := fs.UnWrapObjectInfo(src).(*Object)
|
||||
if ok {
|
||||
fs.Debugf(o, "Reading metadata from %v", srcObj)
|
||||
err := srcObj.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range srcObj.meta {
|
||||
metadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non multipart
|
||||
@@ -3308,10 +3293,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// create the vanilla http request
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in)
|
||||
httpReq, err := http.NewRequest("PUT", url, in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
|
||||
@@ -204,17 +204,6 @@ Fstat instead of Stat which is called on an already open file handle.
|
||||
It has been found that this helps with IBM Sterling SFTP servers which have
|
||||
"extractability" level set to 1 which means only 1 file can be opened at
|
||||
any given time.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -224,28 +213,27 @@ Set to 0 to keep connections indefinitely.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -263,8 +251,7 @@ type Fs struct {
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
}
|
||||
|
||||
@@ -441,9 +428,6 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
@@ -451,12 +435,6 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if cErr := c.closed(); cErr == nil {
|
||||
cErr = c.close()
|
||||
@@ -689,10 +667,6 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift. It overrides the StorageUrl
|
||||
@@ -29,19 +28,19 @@ func newAuth(parentAuth swift.Authenticator, storageURL string, authToken string
|
||||
}
|
||||
|
||||
// Request creates an http.Request for the auth - return nil if not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (*http.Request, error) {
|
||||
func (a *auth) Request(c *swift.Connection) (*http.Request, error) {
|
||||
if a.parentAuth == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return a.parentAuth.Request(ctx, c)
|
||||
return a.parentAuth.Request(c)
|
||||
}
|
||||
|
||||
// Response parses the http.Response
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
func (a *auth) Response(resp *http.Response) error {
|
||||
if a.parentAuth == nil {
|
||||
return nil
|
||||
}
|
||||
return a.parentAuth.Response(ctx, resp)
|
||||
return a.parentAuth.Response(resp)
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -391,7 +391,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
||||
if c.AuthUrl == "" {
|
||||
return nil, errors.New("auth not found")
|
||||
}
|
||||
err := c.Authenticate(ctx) // fills in c.StorageUrl and c.AuthToken
|
||||
err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -467,7 +467,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
@@ -506,7 +506,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.Object) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -516,7 +516,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.O
|
||||
// making sure we read the full metadata for all 0 byte files.
|
||||
// We don't read the metadata for directory marker objects.
|
||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// We have a dangling large object here so just return the original metadata
|
||||
fs.Errorf(o, "dangling large object with no contents")
|
||||
@@ -533,7 +533,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.O
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -544,7 +544,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.O
|
||||
// NewObject finds the Object at remote. If it can't be found it
|
||||
// returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// listFn is called from list and listContainerRoot to handle an object.
|
||||
@@ -556,7 +556,7 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
||||
// container to the start.
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
@@ -571,11 +571,11 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
|
||||
if !recurse {
|
||||
opts.Delimiter = '/'
|
||||
}
|
||||
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = f.c.Objects(ctx, container, opts)
|
||||
objects, err = f.c.Objects(container, opts)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -613,8 +613,8 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
|
||||
type addEntryFn func(fs.DirEntry) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(ctx, container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
if isDirectory {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
||||
@@ -622,7 +622,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
} else {
|
||||
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
|
||||
var o fs.Object
|
||||
o, err = f.newObjectWithInfo(ctx, remote, object)
|
||||
o, err = f.newObjectWithInfo(remote, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -639,12 +639,12 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
if container == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||
err = f.list(container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
@@ -660,7 +660,7 @@ func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, a
|
||||
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -691,7 +691,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -714,7 +714,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
container, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
||||
return f.list(container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
@@ -752,7 +752,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -804,7 +804,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
if !f.noCheckContainer {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = f.c.Container(ctx, container)
|
||||
_, rxHeaders, err = f.c.Container(container)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
}
|
||||
@@ -814,7 +814,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(ctx, container, headers)
|
||||
err = f.c.ContainerCreate(container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -835,7 +835,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
err := f.cache.Remove(container, func() error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.c.ContainerDelete(ctx, container)
|
||||
err := f.c.ContainerDelete(container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -865,7 +865,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
go func() {
|
||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||
}()
|
||||
err := f.list(ctx, container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
||||
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
||||
if o, ok := entry.(*Object); ok {
|
||||
toBeDeleted <- o
|
||||
}
|
||||
@@ -905,7 +905,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
srcContainer, srcPath := srcObj.split()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -944,11 +944,11 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject(ctx)
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
isStaticLargeObject, err := o.isStaticLargeObject(ctx)
|
||||
isStaticLargeObject, err := o.isStaticLargeObject()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -961,8 +961,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
// object isn't found.
|
||||
func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
|
||||
err := o.readMetaData(ctx)
|
||||
func (o *Object) hasHeader(header string) (bool, error) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return false, nil
|
||||
@@ -974,29 +974,29 @@ func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
|
||||
}
|
||||
|
||||
// isDynamicLargeObject checks for X-Object-Manifest header
|
||||
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
|
||||
return o.hasHeader(ctx, "X-Object-Manifest")
|
||||
func (o *Object) isDynamicLargeObject() (bool, error) {
|
||||
return o.hasHeader("X-Object-Manifest")
|
||||
}
|
||||
|
||||
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
||||
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
|
||||
return o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
return o.hasHeader("X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
|
||||
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
func (o *Object) isLargeObject() (result bool, err error) {
|
||||
result, err = o.hasHeader("X-Static-Large-Object")
|
||||
if result {
|
||||
return
|
||||
}
|
||||
result, err = o.hasHeader(ctx, "X-Object-Manifest")
|
||||
result, err = o.hasHeader("X-Object-Manifest")
|
||||
if result {
|
||||
return
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (o *Object) isInContainerVersioning(ctx context.Context, container string) (bool, error) {
|
||||
_, headers, err := o.fs.c.Container(ctx, container)
|
||||
func (o *Object) isInContainerVersioning(container string) (bool, error) {
|
||||
_, headers, err := o.fs.c.Container(container)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1032,7 +1032,7 @@ func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
// it also sets the info
|
||||
//
|
||||
// it returns fs.ErrorObjectNotFound if the object isn't found
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1040,7 +1040,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var h swift.Headers
|
||||
container, containerPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(ctx, container, containerPath)
|
||||
info, h, err = o.fs.c.Object(container, containerPath)
|
||||
return shouldRetryHeaders(h, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1066,7 +1066,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if o.fs.ci.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.lastModified
|
||||
@@ -1081,7 +1081,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
err := o.readMetaData(ctx)
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1099,7 +1099,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
container, containerPath := o.split()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
|
||||
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1120,7 +1120,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
container, containerPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
return
|
||||
@@ -1134,9 +1134,9 @@ func min(x, y int64) int64 {
|
||||
return y
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]string, error) {
|
||||
func (o *Object) getSegmentsLargeObject() (map[string][]string, error) {
|
||||
container, objectName := o.split()
|
||||
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName)
|
||||
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(container, objectName)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to get list segments of object: %v", err)
|
||||
return nil, err
|
||||
@@ -1153,12 +1153,12 @@ func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]strin
|
||||
return containerSegments, nil
|
||||
}
|
||||
|
||||
func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegments map[string][]string) error {
|
||||
func (o *Object) removeSegmentsLargeObject(containerSegments map[string][]string) error {
|
||||
if containerSegments == nil || len(containerSegments) <= 0 {
|
||||
return nil
|
||||
}
|
||||
for container, segments := range containerSegments {
|
||||
_, err := o.fs.c.BulkDelete(ctx, container, segments)
|
||||
_, err := o.fs.c.BulkDelete(container, segments)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to delete bulk segments %v", err)
|
||||
return err
|
||||
@@ -1167,8 +1167,8 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegment
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(ctx); err != nil {
|
||||
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(); err != nil {
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
@@ -1203,14 +1203,14 @@ func urlEncode(str string) string {
|
||||
|
||||
// updateChunks updates the existing object using chunks to a separate
|
||||
// container. It returns a string which prefixes current segments.
|
||||
func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
|
||||
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
@@ -1219,7 +1219,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
|
||||
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1241,7 +1241,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||
return
|
||||
}
|
||||
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
})()
|
||||
for {
|
||||
// can we read at least one byte?
|
||||
@@ -1263,7 +1263,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(ctx, segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
if err == nil {
|
||||
segmentInfos = append(segmentInfos, segmentPath)
|
||||
}
|
||||
@@ -1280,7 +1280,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
|
||||
@@ -1291,13 +1291,13 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
func deleteChunks(ctx context.Context, o *Object, segmentsContainer string, segmentInfos []string) {
|
||||
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||
return
|
||||
}
|
||||
for _, v := range segmentInfos {
|
||||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(ctx, segmentsContainer, v)
|
||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
||||
if e != nil {
|
||||
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
|
||||
}
|
||||
@@ -1320,7 +1320,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Note whether this is a dynamic large object before starting
|
||||
isLargeObject, err := o.isLargeObject(ctx)
|
||||
isLargeObject, err := o.isLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1328,7 +1328,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
//capture segments before upload
|
||||
var segmentsContainer map[string][]string
|
||||
if isLargeObject {
|
||||
segmentsContainer, _ = o.getSegmentsLargeObject(ctx)
|
||||
segmentsContainer, _ = o.getSegmentsLargeObject()
|
||||
}
|
||||
|
||||
// Set the mtime
|
||||
@@ -1339,7 +1339,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.OpenOptionAddHeaders(options, headers)
|
||||
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
_, err = o.updateChunks(ctx, in, headers, size, contentType)
|
||||
_, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1355,7 +1355,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1373,17 +1373,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.size = int64(inCount.BytesRead())
|
||||
}
|
||||
}
|
||||
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
|
||||
isInContainerVersioning, _ := o.isInContainerVersioning(container)
|
||||
// If file was a large object and the container is not enable versioning then remove old/all segments
|
||||
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
|
||||
err := o.removeSegmentsLargeObject(ctx, segmentsContainer)
|
||||
err := o.removeSegmentsLargeObject(segmentsContainer)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object if necessary
|
||||
return o.readMetaData(ctx)
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -1391,14 +1391,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
container, containerPath := o.split()
|
||||
|
||||
//check object is large object
|
||||
isLargeObject, err := o.isLargeObject(ctx)
|
||||
isLargeObject, err := o.isLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//check container has enabled version to reserve segment when delete
|
||||
isInContainerVersioning := false
|
||||
if isLargeObject {
|
||||
isInContainerVersioning, err = o.isInContainerVersioning(ctx, container)
|
||||
isInContainerVersioning, err = o.isInContainerVersioning(container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1406,14 +1406,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
//capture segments object if this object is large object
|
||||
var containerSegments map[string][]string
|
||||
if isLargeObject {
|
||||
containerSegments, err = o.getSegmentsLargeObject(ctx)
|
||||
containerSegments, err = o.getSegmentsLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1425,7 +1425,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
if isLargeObject {
|
||||
return o.removeSegmentsLargeObject(ctx, containerSegments)
|
||||
return o.removeSegmentsLargeObject(containerSegments)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
@@ -144,10 +144,10 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
// error is potato
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, errMessage, err.Error())
|
||||
_, _, err = f.c.Object(ctx, f.rootContainer, path)
|
||||
_, _, err = f.c.Object(f.rootContainer, path)
|
||||
assert.Equal(t, swift.ObjectNotFound, err)
|
||||
prefix := path
|
||||
objs, err := f.c.Objects(ctx, segmentContainer, &swift.ObjectsOpts{
|
||||
objs, err := f.c.Objects(segmentContainer, &swift.ObjectsOpts{
|
||||
Prefix: prefix,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build go1.13,!plan9
|
||||
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
package tardigrade
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build go1.13,!plan9
|
||||
|
||||
package tardigrade
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build go1.13,!plan9
|
||||
|
||||
// Test Tardigrade filesystem interface
|
||||
package tardigrade_test
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
// +build plan9
|
||||
// +build !go1.13 plan9
|
||||
|
||||
package tardigrade
|
||||
|
||||
@@ -176,10 +176,11 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
||||
|
||||
// Create and execute the first request which returns an auth token for the sharepoint service
|
||||
// With this token we can authenticate on the login page and save the returned cookies
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://login.microsoftonline.com/extSTS.srf", buf)
|
||||
req, err := http.NewRequest("POST", "https://login.microsoftonline.com/extSTS.srf", buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
resp, err := client.Do(req)
|
||||
|
||||
@@ -10,7 +10,6 @@ package webdav
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -20,25 +19,20 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/webdav/api"
|
||||
"github.com/rclone/rclone/backend/webdav/odrvcookie"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
ntlmssp "github.com/Azure/go-ntlmssp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,22 +42,8 @@ const (
|
||||
defaultDepth = "1" // depth for PROPFIND
|
||||
)
|
||||
|
||||
const defaultEncodingSharepointNTLM = (encoder.EncodeWin |
|
||||
encoder.EncodeHashPercent | // required by IIS/8.5 in contrast with onedrive which doesn't need it
|
||||
(encoder.Display &^ encoder.EncodeDot) | // test with IIS/8.5 shows that EncodeDot is not needed
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
configEncodingHelp := fmt.Sprintf(
|
||||
"%s\n\nDefault encoding is %s for sharepoint-ntlm or identity otherwise.",
|
||||
config.ConfigEncodingHelp, defaultEncodingSharepointNTLM)
|
||||
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "webdav",
|
||||
Description: "Webdav",
|
||||
@@ -87,17 +67,14 @@ func init() {
|
||||
Help: "Owncloud",
|
||||
}, {
|
||||
Value: "sharepoint",
|
||||
Help: "Sharepoint Online, authenticated by Microsoft account.",
|
||||
}, {
|
||||
Value: "sharepoint-ntlm",
|
||||
Help: "Sharepoint with NTLM authentication. Usually self-hosted or on-premises.",
|
||||
Help: "Sharepoint",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Other site/service or software",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name. In case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
|
||||
Help: "User name",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
@@ -109,23 +86,18 @@ func init() {
|
||||
Name: "bearer_token_command",
|
||||
Help: "Command to run to get a bearer token",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: configEncodingHelp,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
BearerToken string `config:"bearer_token"`
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
BearerToken string `config:"bearer_token"`
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -142,10 +114,8 @@ type Fs struct {
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
checkBeforePurge bool // enables extra check that directory to purge really exists
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -209,22 +179,6 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// safeRoundTripper is a wrapper for http.RoundTripper that serializes
|
||||
// http roundtrips. NTLM authentication sequence can involve up to four
|
||||
// rounds of negotiations and might fail due to concurrency.
|
||||
// This wrapper allows to use ntlmssp.Negotiator safely with goroutines.
|
||||
type safeRoundTripper struct {
|
||||
fs *Fs
|
||||
rt http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip guards wrapped RoundTripper by a mutex.
|
||||
func (srt *safeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
srt.fs.ntlmAuthMu.Lock()
|
||||
defer srt.fs.ntlmAuthMu.Unlock()
|
||||
return srt.rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
// itemIsDir returns true if the item is a directory
|
||||
//
|
||||
// When a client sees a resourcetype it doesn't recognize it should
|
||||
@@ -331,11 +285,7 @@ func addSlash(s string) string {
|
||||
|
||||
// filePath returns a file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
subPath := path.Join(f.root, file)
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.FromStandardPath(subPath)
|
||||
}
|
||||
return rest.URLPathEscape(subPath)
|
||||
return rest.URLPathEscape(path.Join(f.root, file))
|
||||
}
|
||||
|
||||
// dirPath returns a directory path (f.root, dir)
|
||||
@@ -374,10 +324,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
if opt.Enc == encoder.EncodeZero && opt.Vendor == "sharepoint-ntlm" {
|
||||
opt.Enc = defaultEncodingSharepointNTLM
|
||||
}
|
||||
|
||||
// Parse the endpoint
|
||||
u, err := url.Parse(opt.URL)
|
||||
if err != nil {
|
||||
@@ -390,28 +336,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(u.String()),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
if opt.Vendor == "sharepoint-ntlm" {
|
||||
// Disable transparent HTTP/2 support as per https://golang.org/pkg/net/http/ ,
|
||||
// otherwise any connection to IIS 10.0 fails with 'stream error: stream ID 39; HTTP_1_1_REQUIRED'
|
||||
// https://docs.microsoft.com/en-us/iis/get-started/whats-new-in-iis-10/http2-on-iis says:
|
||||
// 'Windows authentication (NTLM/Kerberos/Negotiate) is not supported with HTTP/2.'
|
||||
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
})
|
||||
|
||||
// Add NTLM layer
|
||||
client.Transport = &safeRoundTripper{
|
||||
fs: f,
|
||||
rt: ntlmssp.Negotiator{RoundTripper: t},
|
||||
}
|
||||
}
|
||||
f.srv = rest.NewClient(client).SetRoot(u.String())
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
@@ -537,16 +465,6 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
// to determine if we may have found a file, the request has to be resent
|
||||
// with the depth set to 0
|
||||
f.retryWithZeroDepth = true
|
||||
case "sharepoint-ntlm":
|
||||
// Sharepoint with NTLM authentication
|
||||
// See comment above
|
||||
f.retryWithZeroDepth = true
|
||||
|
||||
// Sharepoint 2016 returns status 204 to the purge request
|
||||
// even if the directory to purge does not really exist
|
||||
// so we must perform an extra check to detect this
|
||||
// condition and return a proper error code.
|
||||
f.checkBeforePurge = true
|
||||
case "other":
|
||||
default:
|
||||
fs.Debugf(f, "Unknown vendor %q", vendor)
|
||||
@@ -665,11 +583,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, baseURL.Path)
|
||||
continue
|
||||
}
|
||||
subPath := u.Path[len(baseURL.Path):]
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.ToStandardPath(subPath)
|
||||
}
|
||||
remote := path.Join(dir, subPath)
|
||||
remote := path.Join(dir, u.Path[len(baseURL.Path):])
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
@@ -886,21 +800,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if notEmpty {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
} else if f.checkBeforePurge {
|
||||
// We are doing purge as the `check` argument is unset.
|
||||
// The quirk says that we are working with Sharepoint 2016.
|
||||
// This provider returns status 204 even if the purged directory
|
||||
// does not really exist so we perform an extra check here.
|
||||
// Only the existence is checked, all other errors must be
|
||||
// ignored here to make the rclone test suite pass.
|
||||
depth := defaultDepth
|
||||
if f.retryWithZeroDepth {
|
||||
depth = "0"
|
||||
}
|
||||
_, err := f.readMetaDataForPath(ctx, dir, depth)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
|
||||
@@ -38,14 +38,3 @@ func TestIntegration3(t *testing.T) {
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration4(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNTLM:",
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -537,7 +537,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
RootURL: location,
|
||||
Method: "GET",
|
||||
}
|
||||
deadline := time.Now().Add(f.ci.TimeoutOrInfinite())
|
||||
deadline := time.Now().Add(f.ci.Timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
@@ -568,7 +568,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
|
||||
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
|
||||
}
|
||||
|
||||
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
|
||||
|
||||
@@ -27,22 +27,17 @@ import (
|
||||
|
||||
var (
|
||||
// Flags
|
||||
debug = flag.Bool("d", false, "Print commands instead of running them.")
|
||||
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
|
||||
copyAs = flag.String("release", "", "Make copies of the releases with this name")
|
||||
gitLog = flag.String("git-log", "", "git log to include as well")
|
||||
include = flag.String("include", "^.*$", "os/arch regexp to include")
|
||||
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
extraEnv = flag.String("env", "", "comma separated list of VAR=VALUE env vars to set")
|
||||
macOSSDK = flag.String("macos-sdk", "", "macOS SDK to use")
|
||||
macOSArch = flag.String("macos-arch", "", "macOS arch to use")
|
||||
extraCgoCFlags = flag.String("cgo-cflags", "", "extra CGO_CFLAGS")
|
||||
extraCgoLdFlags = flag.String("cgo-ldflags", "", "extra CGO_LDFLAGS")
|
||||
debug = flag.Bool("d", false, "Print commands instead of running them.")
|
||||
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
|
||||
copyAs = flag.String("release", "", "Make copies of the releases with this name")
|
||||
gitLog = flag.String("git-log", "", "git log to include as well")
|
||||
include = flag.String("include", "^.*$", "os/arch regexp to include")
|
||||
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
)
|
||||
|
||||
// GOOS/GOARCH pairs we build for
|
||||
@@ -52,7 +47,6 @@ var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"darwin/amd64",
|
||||
"darwin/arm64",
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
@@ -78,7 +72,7 @@ var osarches = []string{
|
||||
|
||||
// Special environment flags for a given arch
|
||||
var archFlags = map[string][]string{
|
||||
"386": {"GO386=softfloat"},
|
||||
"386": {"GO386=387"},
|
||||
"mips": {"GOMIPS=softfloat"},
|
||||
"mipsle": {"GOMIPS=softfloat"},
|
||||
"arm-v7": {"GOARM=7"},
|
||||
@@ -285,15 +279,6 @@ func stripVersion(goarch string) string {
|
||||
return goarch[:i]
|
||||
}
|
||||
|
||||
// run the command returning trimmed output
|
||||
func runOut(command ...string) string {
|
||||
out, err := exec.Command(command[0], command[1:]...).Output()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to run %q: %v", command, err)
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||
@@ -329,35 +314,6 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + stripVersion(goarch),
|
||||
}
|
||||
if *extraEnv != "" {
|
||||
env = append(env, strings.Split(*extraEnv, ",")...)
|
||||
}
|
||||
var (
|
||||
cgoCFlags []string
|
||||
cgoLdFlags []string
|
||||
)
|
||||
if *macOSSDK != "" {
|
||||
flag := "-isysroot " + runOut("xcrun", "--sdk", *macOSSDK, "--show-sdk-path")
|
||||
cgoCFlags = append(cgoCFlags, flag)
|
||||
cgoLdFlags = append(cgoLdFlags, flag)
|
||||
}
|
||||
if *macOSArch != "" {
|
||||
flag := "-arch " + *macOSArch
|
||||
cgoCFlags = append(cgoCFlags, flag)
|
||||
cgoLdFlags = append(cgoLdFlags, flag)
|
||||
}
|
||||
if *extraCgoCFlags != "" {
|
||||
cgoCFlags = append(cgoCFlags, *extraCgoCFlags)
|
||||
}
|
||||
if *extraCgoLdFlags != "" {
|
||||
cgoLdFlags = append(cgoLdFlags, *extraCgoLdFlags)
|
||||
}
|
||||
if len(cgoCFlags) > 0 {
|
||||
env = append(env, "CGO_CFLAGS="+strings.Join(cgoCFlags, " "))
|
||||
}
|
||||
if len(cgoLdFlags) > 0 {
|
||||
env = append(env, "CGO_LDFLAGS="+strings.Join(cgoLdFlags, " "))
|
||||
}
|
||||
if !*cgo {
|
||||
env = append(env, "CGO_ENABLED=0")
|
||||
} else {
|
||||
|
||||
146
bin/make_test_files.go
Normal file
146
bin/make_test_files.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// +build ignore
|
||||
|
||||
// Build a directory structure with the required number of files in
|
||||
//
|
||||
// Run with go run make_test_files.go [flag] <directory>
|
||||
package main
|
||||
|
||||
import (
|
||||
cryptrand "crypto/rand"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
||||
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy")
|
||||
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
||||
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
||||
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
||||
maxFileNameLength = flag.Int("max-name-length", 12, "Maximum size of files to create")
|
||||
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
// randomString create a random string for test purposes
|
||||
func randomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
length := rand.Intn(*maxFileNameLength-*minFileNameLength) + *minFileNameLength
|
||||
name = randomString(length)
|
||||
if _, found := fileNames[name]; !found {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileNames[name] = struct{}{}
|
||||
return name
|
||||
}
|
||||
|
||||
// dir is a directory in the directory hierarchy being built up
|
||||
type dir struct {
|
||||
name string
|
||||
depth int
|
||||
children []*dir
|
||||
parent *dir
|
||||
}
|
||||
|
||||
// Create a random directory hierarchy under d
|
||||
func (d *dir) createDirectories() {
|
||||
for totalDirectories < directoriesToCreate {
|
||||
newDir := &dir{
|
||||
name: fileName(),
|
||||
depth: d.depth + 1,
|
||||
parent: d,
|
||||
}
|
||||
d.children = append(d.children, newDir)
|
||||
totalDirectories++
|
||||
switch rand.Intn(4) {
|
||||
case 0:
|
||||
if d.depth < *maxDepth {
|
||||
newDir.createDirectories()
|
||||
}
|
||||
case 1:
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// list the directory hierarchy
|
||||
func (d *dir) list(path string, output []string) []string {
|
||||
dirPath := filepath.Join(path, d.name)
|
||||
output = append(output, dirPath)
|
||||
for _, subDir := range d.children {
|
||||
output = subDir.list(dirPath, output)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string) {
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
}
|
||||
path := filepath.Join(dir, name)
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
size := rand.Int63n(*maxFileSize-*minFileSize) + *minFileSize
|
||||
_, err = io.CopyN(fd, cryptrand.Reader, size)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Require 1 directory argument")
|
||||
}
|
||||
outputDirectory := args[0]
|
||||
log.Printf("Output dir %q", outputDirectory)
|
||||
|
||||
directoriesToCreate = *numberOfFiles / *averageFilesPerDirectory
|
||||
log.Printf("directoriesToCreate %v", directoriesToCreate)
|
||||
root := &dir{name: outputDirectory, depth: 1}
|
||||
for totalDirectories < directoriesToCreate {
|
||||
root.createDirectories()
|
||||
}
|
||||
dirs := root.list("", []string{})
|
||||
for i := 0; i < *numberOfFiles; i++ {
|
||||
dir := dirs[rand.Intn(len(dirs))]
|
||||
writeFile(dir, fileName())
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/genautocomplete"
|
||||
_ "github.com/rclone/rclone/cmd/gendocs"
|
||||
_ "github.com/rclone/rclone/cmd/hashsum"
|
||||
_ "github.com/rclone/rclone/cmd/info"
|
||||
_ "github.com/rclone/rclone/cmd/link"
|
||||
_ "github.com/rclone/rclone/cmd/listremotes"
|
||||
_ "github.com/rclone/rclone/cmd/ls"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/lsjson"
|
||||
_ "github.com/rclone/rclone/cmd/lsl"
|
||||
_ "github.com/rclone/rclone/cmd/md5sum"
|
||||
_ "github.com/rclone/rclone/cmd/memtest"
|
||||
_ "github.com/rclone/rclone/cmd/mkdir"
|
||||
_ "github.com/rclone/rclone/cmd/mount"
|
||||
_ "github.com/rclone/rclone/cmd/mount2"
|
||||
@@ -52,11 +54,6 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/sha1sum"
|
||||
_ "github.com/rclone/rclone/cmd/size"
|
||||
_ "github.com/rclone/rclone/cmd/sync"
|
||||
_ "github.com/rclone/rclone/cmd/test"
|
||||
_ "github.com/rclone/rclone/cmd/test/histogram"
|
||||
_ "github.com/rclone/rclone/cmd/test/info"
|
||||
_ "github.com/rclone/rclone/cmd/test/makefiles"
|
||||
_ "github.com/rclone/rclone/cmd/test/memory"
|
||||
_ "github.com/rclone/rclone/cmd/touch"
|
||||
_ "github.com/rclone/rclone/cmd/tree"
|
||||
_ "github.com/rclone/rclone/cmd/version"
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -37,19 +36,6 @@ func init() {
|
||||
mountlib.AddRc("cmount", mount)
|
||||
}
|
||||
|
||||
// Find the option string in the current options
|
||||
func findOption(name string, options []string) (found bool) {
|
||||
for _, option := range options {
|
||||
if option == "-o" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(option, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
// Options
|
||||
@@ -119,13 +105,6 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
for _, option := range opt.ExtraFlags {
|
||||
options = append(options, option)
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
if !findOption("modules=iconv", options) {
|
||||
iconv := "modules=iconv,from_code=UTF-8,to_code=UTF-8-MAC"
|
||||
options = append(options, "-o", iconv)
|
||||
fs.Debugf(nil, "Adding \"-o %s\" for macOS", iconv)
|
||||
}
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
|
||||
@@ -103,9 +103,8 @@ func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, erro
|
||||
} else if !os.IsNotExist(err) {
|
||||
return "", errors.Wrap(err, "failed to retrieve mountpoint path information")
|
||||
}
|
||||
if isDriveRootPath(mountpath) { // Assume intention with "X:\" was "X:"
|
||||
mountpath = mountpath[:len(mountpath)-1] // WinFsp needs drive mountpoints without trailing path separator
|
||||
}
|
||||
//if isDriveRootPath(mountpath) { // Assume intention with "X:\" was "X:"
|
||||
// mountpoint = mountpath[:len(mountpath)-1] // WinFsp needs drive mountpoints without trailing path separator
|
||||
if !isDrive(mountpath) {
|
||||
// Assuming directory path, since it is not a pure drive letter string such as "X:".
|
||||
// Drive letter string can be used as is, since we have already checked it does not exist,
|
||||
@@ -114,12 +113,14 @@ func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, erro
|
||||
fs.Errorf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint")
|
||||
opt.NetworkMode = false
|
||||
}
|
||||
var err error
|
||||
if mountpath, err = filepath.Abs(mountpath); err != nil { // Ensures parent is found but also more informative log messages
|
||||
return "", errors.Wrap(err, "mountpoint path is not valid: "+mountpath)
|
||||
}
|
||||
parent := filepath.Join(mountpath, "..")
|
||||
if _, err = os.Stat(parent); err != nil {
|
||||
if parent == "" || parent == "." {
|
||||
return "", errors.New("mountpoint directory is not valid: " + parent)
|
||||
}
|
||||
if os.IsPathSeparator(parent[len(parent)-1]) { // Ends in a separator only if it is the root directory
|
||||
return "", errors.New("mountpoint directory is at root: " + parent)
|
||||
}
|
||||
if _, err := os.Stat(parent); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", errors.New("parent of mountpoint directory does not exist: " + parent)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package info
|
||||
|
||||
// FIXME once translations are implemented will need a no-escape
|
||||
// option for Put so we can make these tests work again
|
||||
// option for Put so we can make these tests work agaig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -21,8 +20,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/test"
|
||||
"github.com/rclone/rclone/cmd/test/info/internal"
|
||||
"github.com/rclone/rclone/cmd/info/internal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -37,7 +35,6 @@ var (
|
||||
checkControl bool
|
||||
checkLength bool
|
||||
checkStreaming bool
|
||||
all bool
|
||||
uploadWait time.Duration
|
||||
positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`)
|
||||
positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`)
|
||||
@@ -45,15 +42,14 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.")
|
||||
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization.")
|
||||
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters.")
|
||||
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
|
||||
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
|
||||
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
||||
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length.")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size.")
|
||||
flags.BoolVarP(cmdFlags, &all, "all", "", false, "Run all tests.")
|
||||
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -63,20 +59,10 @@ var commandDefinition = &cobra.Command{
|
||||
to write to the paths passed in and how long they can be. It can take some
|
||||
time. It will write test files into the remote:path passed in. It outputs
|
||||
a bit of go code for each one.
|
||||
|
||||
**NB** this can create undeletable files and other hazards - use with care
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1e6, command, args)
|
||||
if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !all {
|
||||
log.Fatalf("no tests selected - select a test or use -all")
|
||||
}
|
||||
if all {
|
||||
checkNormalization = true
|
||||
checkControl = true
|
||||
checkLength = true
|
||||
checkStreaming = true
|
||||
}
|
||||
for i := range args {
|
||||
f := cmd.NewFsDir(args[i : i+1])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/cmd/test/info/internal"
|
||||
"github.com/rclone/rclone/cmd/info/internal"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -1,4 +1,4 @@
|
||||
package memory
|
||||
package memtest
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,19 +6,19 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/test"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "memory remote:path",
|
||||
Short: `Load all the objects at remote:path into memory and report memory stats.`,
|
||||
Use: "memtest remote:path",
|
||||
Short: `Load all the objects at remote:path and report memory stats.`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux freebsd
|
||||
// +build linux,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux freebsd
|
||||
// +build linux,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// FUSE main Fs
|
||||
|
||||
// +build linux freebsd
|
||||
// +build linux,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux freebsd
|
||||
// +build linux,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package mount implements a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux freebsd
|
||||
// +build linux,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux freebsd
|
||||
// +build linux,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// Invert the build constraint: linux freebsd
|
||||
// Invert the build constraint: linux,go1.13 freebsd,go1.13
|
||||
//
|
||||
// !((linux&&go1.13) || (freebsd&&go1.13))
|
||||
// == !(linux&&go1.13) && !(freebsd&&go1.13))
|
||||
// == (!linux || !go1.13) && (!freebsd || !go1.13))
|
||||
|
||||
// +build !linux
|
||||
// +build !freebsd
|
||||
// +build !linux !go1.13
|
||||
// +build !freebsd !go1.13
|
||||
|
||||
package mount
|
||||
|
||||
@@ -179,15 +179,15 @@ is an **empty** **existing** directory:
|
||||
|
||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||
for details. The following examples will mount to an automatically assigned drive,
|
||||
to specific drive letter |X:|, to path |C:\path\parent\mount|
|
||||
(where parent directory or drive must exist, and mount must **not** exist,
|
||||
to specific drive letter |X:|, to path |C:\path\to\nonexistent\directory|
|
||||
(which must be **non-existent** subdirectory of an **existing** parent directory or drive,
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||
the last example will mount as network share |\\cloud\remote| and map it to an
|
||||
automatically assigned drive:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
@@ -241,14 +241,14 @@ and experience unexpected program errors, freezes or other issues, consider moun
|
||||
as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path representing a **non-existent** subdirectory of an **existing** parent
|
||||
or to a path - which must be **non-existent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value |*| will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone @ remote:path/to/files X:
|
||||
|
||||
Option |--volname| can be used to set a custom volume name for the mounted
|
||||
@@ -321,24 +321,10 @@ Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
|
||||
but not "write extended attributes" (WinFsp does not support extended attributes,
|
||||
see [this](https://github.com/billziss-gh/winfsp/wiki/NTFS-Compatibility)).
|
||||
Windows will then show this as basic permission "Special" instead of "Write",
|
||||
because "Write" includes the "write extended attributes" permission.
|
||||
|
||||
#### Windows caveats
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/jobs"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -165,7 +164,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
|
||||
if call == nil {
|
||||
return nil, errors.Errorf("method %q not found", path)
|
||||
}
|
||||
_, out, err := jobs.NewJob(ctx, call.Fn, in)
|
||||
out, err = call.Fn(context.Background(), in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "loopback call failed")
|
||||
}
|
||||
@@ -185,10 +184,11 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
|
||||
return nil, errors.Wrap(err, "failed to encode JSON")
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(data))
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make request")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if authUser != "" || authPass != "" {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package ftp implements an FTP server for rclone
|
||||
|
||||
//+build !plan9
|
||||
//+build !plan9,go1.13
|
||||
|
||||
package ftp
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
//
|
||||
// We skip tests on platforms with troublesome character mappings
|
||||
|
||||
//+build !windows,!darwin,!plan9
|
||||
//+build !windows,!darwin,!plan9,go1.13
|
||||
|
||||
package ftp
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
// +build plan9 !go1.13
|
||||
|
||||
package ftp
|
||||
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/test"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "histogram [remote:path]",
|
||||
Short: `Makes a histogram of file name characters.`,
|
||||
Long: `This command outputs JSON which shows the histogram of characters used
|
||||
in filenames in the remote:path specified.
|
||||
|
||||
The data doesn't contain any identifying information but is useful for
|
||||
the rclone developers when developing filename compression.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsDir(args)
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
var hist [256]int64
|
||||
err := walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
base := path.Base(entry.Remote())
|
||||
for i := range base {
|
||||
hist[base[i]]++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
// enc.SetIndent("", "\t")
|
||||
err = enc.Encode(&hist)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
// Package makefiles builds a directory structure with the required
|
||||
// number of files in of the required size.
|
||||
package makefiles
|
||||
|
||||
import (
|
||||
cryptrand "crypto/rand"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/test"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
numberOfFiles = 1000
|
||||
averageFilesPerDirectory = 10
|
||||
maxDepth = 10
|
||||
minFileSize = fs.SizeSuffix(0)
|
||||
maxFileSize = fs.SizeSuffix(100)
|
||||
minFileNameLength = 4
|
||||
maxFileNameLength = 12
|
||||
|
||||
// Globals
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.IntVarP(cmdFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create")
|
||||
flags.IntVarP(cmdFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory")
|
||||
flags.IntVarP(cmdFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy")
|
||||
flags.FVarP(cmdFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create")
|
||||
flags.FVarP(cmdFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create")
|
||||
flags.IntVarP(cmdFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names")
|
||||
flags.IntVarP(cmdFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "makefiles <dir>",
|
||||
Short: `Make a random file hierarchy in <dir>`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
outputDirectory := args[0]
|
||||
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
||||
averageSize := (minFileSize + maxFileSize) / 2
|
||||
log.Printf("Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory)
|
||||
root := &dir{name: outputDirectory, depth: 1}
|
||||
for totalDirectories < directoriesToCreate {
|
||||
root.createDirectories()
|
||||
}
|
||||
dirs := root.list("", []string{})
|
||||
for i := 0; i < numberOfFiles; i++ {
|
||||
dir := dirs[rand.Intn(len(dirs))]
|
||||
writeFile(dir, fileName())
|
||||
}
|
||||
log.Printf("Done.")
|
||||
},
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
length := rand.Intn(maxFileNameLength-minFileNameLength) + minFileNameLength
|
||||
name = random.String(length)
|
||||
if _, found := fileNames[name]; !found {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileNames[name] = struct{}{}
|
||||
return name
|
||||
}
|
||||
|
||||
// dir is a directory in the directory hierarchy being built up
|
||||
type dir struct {
|
||||
name string
|
||||
depth int
|
||||
children []*dir
|
||||
parent *dir
|
||||
}
|
||||
|
||||
// Create a random directory hierarchy under d
|
||||
func (d *dir) createDirectories() {
|
||||
for totalDirectories < directoriesToCreate {
|
||||
newDir := &dir{
|
||||
name: fileName(),
|
||||
depth: d.depth + 1,
|
||||
parent: d,
|
||||
}
|
||||
d.children = append(d.children, newDir)
|
||||
totalDirectories++
|
||||
switch rand.Intn(4) {
|
||||
case 0:
|
||||
if d.depth < maxDepth {
|
||||
newDir.createDirectories()
|
||||
}
|
||||
case 1:
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// list the directory hierarchy
|
||||
func (d *dir) list(path string, output []string) []string {
|
||||
dirPath := filepath.Join(path, d.name)
|
||||
output = append(output, dirPath)
|
||||
for _, subDir := range d.children {
|
||||
output = subDir.list(dirPath, output)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string) {
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
}
|
||||
path := filepath.Join(dir, name)
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
size := rand.Int63n(int64(maxFileSize-minFileSize)) + int64(minFileSize)
|
||||
_, err = io.CopyN(fd, cryptrand.Reader, size)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(Command)
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
var Command = &cobra.Command{
|
||||
Use: "test <subcommand>",
|
||||
Short: `Run a test command`,
|
||||
Long: `Rclone test is used to run test commands.
|
||||
|
||||
Select which test comand you want with the subcommand, eg
|
||||
|
||||
rclone test memory remote:
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
|
||||
**NB** Be careful running these commands, they may do strange things
|
||||
so reading their documentation first is recommended.
|
||||
`,
|
||||
}
|
||||
@@ -456,20 +456,3 @@ put them back in again.` >}}
|
||||
* Nicolas Rueff <nicolas@rueff.fr>
|
||||
* Pau Rodriguez-Estivill <prodrigestivill@gmail.com>
|
||||
* Bob Pusateri <BobPusateri@users.noreply.github.com>
|
||||
* Alex JOST <25005220+dimejo@users.noreply.github.com>
|
||||
* Alexey Tabakman <samosad.ru@gmail.com>
|
||||
* David Sze <sze.david@gmail.com>
|
||||
* cynthia kwok <cynthia.m.kwok@gmail.com>
|
||||
* Ankur Gupta <agupta@egnyte.com>
|
||||
* Miron Veryanskiy <MironVeryanskiy@gmail.com>
|
||||
* K265 <k.265@qq.com>
|
||||
* Vesnyx <Vesnyx@users.noreply.github.com>
|
||||
* Dmitry Chepurovskiy <me@dm3ch.net>
|
||||
* Rauno Ots <rauno.ots@cgi.com>
|
||||
* Georg Neugschwandtner <georg.neugschwandtner@gmx.net>
|
||||
* pvalls <polvallsrue@gmail.com>
|
||||
* Robert Thomas <31854736+wolveix@users.noreply.github.com>
|
||||
* Romeo Kienzler <romeo.kienzler@gmail.com>
|
||||
* tYYGH <tYYGH@users.noreply.github.com>
|
||||
* georne <77802995+georne@users.noreply.github.com>
|
||||
* Maxwell Calman <mcalman@MacBook-Pro.local>
|
||||
|
||||
@@ -5,6 +5,43 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.54.1 - 2021-03-08
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.54.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix --bwlimit when up or down is off (Nick Craig-Wood)
|
||||
* docs
|
||||
* Fix nesting of brackets and backticks in ftp docs (edwardxml)
|
||||
* Fix broken link in sftp page (edwardxml)
|
||||
* Fix typo in crypt.md (Romeo Kienzler)
|
||||
* Changelog: Correct link to digitalis.io (Alex JOST)
|
||||
* Replace #file-caching with #vfs-file-caching (Miron Veryanskiy)
|
||||
* Convert bogus example link to code (edwardxml)
|
||||
* Remove dead link from rc.md (edwardxml)
|
||||
* rc: Sync,copy,move: document createEmptySrcDirs parameter (Nick Craig-Wood)
|
||||
* lsjson: Fix unterminated JSON in the presence of errors (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Fix mount dropping on macOS by setting --daemon-timeout 10m (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Document simultaneous usage with the same cache shouldn't be used (Nick Craig-Wood)
|
||||
* B2
|
||||
* Automatically raise upload cutoff to avoid spurious error (Nick Craig-Wood)
|
||||
* Fix failed to create file system with application key limited to a prefix (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Refer to Shared Drives instead of Team Drives (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Add scopes to oauth request and optionally "members.read" (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix failed to create file system with folder level permissions policy (Nick Craig-Wood)
|
||||
* Fix Wasabi HEAD requests returning stale data by using only 1 transport (Nick Craig-Wood)
|
||||
* Fix shared_credentials_file auth (Dmitry Chepurovskiy)
|
||||
* Add --s3-no-head to reducing costs docs (Nick Craig-Wood)
|
||||
* Union
|
||||
* Fix mkdir at root with remote:/ (Nick Craig-Wood)
|
||||
* Zoho
|
||||
* Fix custom client id's (buengese)
|
||||
|
||||
## v1.54.0 - 2021-02-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.54.0)
|
||||
|
||||
@@ -151,9 +151,6 @@ Note that `list` assembles composite directory entries only when chunk names
|
||||
match the configured format and treats non-conforming file names as normal
|
||||
non-chunked files.
|
||||
|
||||
When using `norename` transactions, chunk names will additionally have a unique
|
||||
file version suffix. For example, `BIG_FILE_NAME.rclone_chunk.001_bp562k`.
|
||||
|
||||
|
||||
### Metadata
|
||||
|
||||
@@ -173,7 +170,6 @@ for composite files. Meta objects carry the following fields:
|
||||
- `nchunks` - number of data chunks in file
|
||||
- `md5` - MD5 hashsum of composite file (if present)
|
||||
- `sha1` - SHA1 hashsum (if present)
|
||||
- `txn` - identifies current version of the file
|
||||
|
||||
There is no field for composite file name as it's simply equal to the name
|
||||
of meta object on the wrapped remote. Please refer to respective sections
|
||||
@@ -246,8 +242,8 @@ use modification time of the first data chunk.
|
||||
|
||||
### Migrations
|
||||
|
||||
The idiomatic way to migrate to a different chunk size, hash type, transaction
|
||||
style or chunk naming scheme is to:
|
||||
The idiomatic way to migrate to a different chunk size, hash type or
|
||||
chunk naming scheme is to:
|
||||
|
||||
- Collect all your chunked files under a directory and have your
|
||||
chunker remote point to it.
|
||||
@@ -307,8 +303,6 @@ Chunker included in rclone releases up to `v1.54` can sometimes fail to
|
||||
detect metadata produced by recent versions of rclone. We recommend users
|
||||
to keep rclone up-to-date to avoid data corruption.
|
||||
|
||||
Changing `transactions` is dangerous and requires explicit migration.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/chunker/chunker.go then run make backenddocs" >}}
|
||||
### Standard Options
|
||||
|
||||
|
||||
@@ -378,6 +378,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
|
||||
@@ -134,6 +134,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
|
||||
@@ -133,6 +133,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
|
||||
@@ -205,6 +205,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
|
||||
@@ -144,6 +144,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
|
||||
@@ -213,6 +213,13 @@ for two reasons. Firstly because it is only checked every
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
You **should not** run two copies of rclone using the same VFS cache
|
||||
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||
This can potentially cause data corruption if you do. You can work
|
||||
around this by giving each rclone its own cache hierarchy with
|
||||
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||
use don't overlap.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
|
||||
@@ -363,7 +363,7 @@ This option controls the bandwidth limit. For example
|
||||
|
||||
--bwlimit 10M
|
||||
|
||||
would mean limit the upload and download bandwidth to 10 MByte/s.
|
||||
would mean limit the upload and download bandwidth to 10 Mbyte/s.
|
||||
**NB** this is **bytes** per second not **bits** per second. To use a
|
||||
single limit, specify the desired bandwidth in kBytes/s, or use a
|
||||
suffix b|k|M|G. The default is `0` which means to not limit bandwidth.
|
||||
@@ -373,7 +373,7 @@ The upload and download bandwidth can be specified seperately, as
|
||||
|
||||
--bwlimit 10M:100k
|
||||
|
||||
would mean limit the upload bandwidth to 10 MByte/s and the download
|
||||
would mean limit the upload bandwidth to 10 Mbyte/s and the download
|
||||
bandwidth to 100 kByte/s. Either limit can be "off" meaning no limit, so
|
||||
to just limit the upload bandwidth you would use
|
||||
|
||||
@@ -402,9 +402,9 @@ working hours could be:
|
||||
`--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"`
|
||||
|
||||
In this example, the transfer bandwidth will be set to 512kBytes/sec
|
||||
at 8am every day. At noon, it will rise to 10MByte/s, and drop back
|
||||
at 8am every day. At noon, it will rise to 10Mbytes/s, and drop back
|
||||
to 512kBytes/sec at 1pm. At 6pm, the bandwidth limit will be set to
|
||||
30MByte/s, and at 11pm it will be completely disabled (full speed).
|
||||
30MBytes/s, and at 11pm it will be completely disabled (full speed).
|
||||
Anything between 11pm and 8am will remain unlimited.
|
||||
|
||||
An example of timetable with `WEEKDAY` could be:
|
||||
@@ -412,8 +412,8 @@ An example of timetable with `WEEKDAY` could be:
|
||||
`--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"`
|
||||
|
||||
It means that, the transfer bandwidth will be set to 512kBytes/sec on
|
||||
Monday. It will rise to 10MByte/s before the end of Friday. At 10:00
|
||||
on Saturday it will be set to 1MByte/s. From 20:00 on Sunday it will
|
||||
Monday. It will rise to 10Mbytes/s before the end of Friday. At 10:00
|
||||
on Saturday it will be set to 1Mbyte/s. From 20:00 on Sunday it will
|
||||
be unlimited.
|
||||
|
||||
Timeslots without `WEEKDAY` are extended to the whole week. So this
|
||||
@@ -600,21 +600,6 @@ This flag can be useful for debugging and in exceptional circumstances
|
||||
(e.g. Google Drive limiting the total volume of Server Side Copies to
|
||||
100GB/day).
|
||||
|
||||
### --dscp VALUE ###
|
||||
|
||||
Specify a DSCP value or name to use in connections. This could help QoS
|
||||
system to identify traffic class. BE, EF, DF, LE, CSx and AFxx are allowed.
|
||||
|
||||
See the description of [differentiated services](https://en.wikipedia.org/wiki/Differentiated_services) to get an idea of
|
||||
this field. Setting this to 1 (LE) to identify the flow to SCAVENGER class
|
||||
can avoid occupying too much bandwidth in a network with DiffServ support ([RFC 8622](https://tools.ietf.org/html/rfc8622)).
|
||||
|
||||
For example, if you configured QoS on router to handle LE properly. Running:
|
||||
```
|
||||
rclone copy --dscp LE from:/from to:/to
|
||||
```
|
||||
would make the priority lower than usual internet flows.
|
||||
|
||||
### -n, --dry-run ###
|
||||
|
||||
Do a trial run with no permanent changes. Use this to see what rclone
|
||||
|
||||
@@ -13,7 +13,7 @@ Rclone Download {{< version >}}
|
||||
| Intel/AMD - 32 Bit | {{< download windows 386 >}} | - | {{< download linux 386 >}} | {{< download linux 386 deb >}} | {{< download linux 386 rpm >}} | {{< download freebsd 386 >}} | {{< download netbsd 386 >}} | {{< download openbsd 386 >}} | {{< download plan9 386 >}} | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< download linux arm >}} | {{< download linux arm deb >}} | {{< download linux arm rpm >}} | {{< download freebsd arm >}} | {{< download netbsd arm >}} | - | - | - |
|
||||
| ARMv7 - 32 Bit | - | - | {{< download linux arm-v7 >}} | {{< download linux arm-v7 deb >}} | {{< download linux arm-v7 rpm >}} | {{< download freebsd arm-v7 >}} | {{< download netbsd arm-v7 >}} | - | - | - |
|
||||
| ARM - 64 Bit | - | {{< download osx arm64 >}} | {{< download linux arm64 >}} | {{< download linux arm64 deb >}} | {{< download linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| ARM - 64 Bit | - | - | {{< download linux arm64 >}} | {{< download linux arm64 deb >}} | {{< download linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Big Endian | - | - | {{< download linux mips >}} | {{< download linux mips deb >}} | {{< download linux mips rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Little Endian | - | - | {{< download linux mipsle >}} | {{< download linux mipsle deb >}} | {{< download linux mipsle rpm >}} | - | - | - | - | - |
|
||||
|
||||
@@ -82,7 +82,7 @@ script) from a URL which doesn't change then you can use these links.
|
||||
| Intel/AMD - 32 Bit | {{< cdownload windows 386 >}} | - | {{< cdownload linux 386 >}} | {{< cdownload linux 386 deb >}} | {{< cdownload linux 386 rpm >}} | {{< cdownload freebsd 386 >}} | {{< cdownload netbsd 386 >}} | {{< cdownload openbsd 386 >}} | {{< cdownload plan9 386 >}} | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
|
||||
| ARMv7 - 32 Bit | - | - | {{< cdownload linux arm-v7 >}} | {{< cdownload linux arm-v7 deb >}} | {{< cdownload linux arm-v7 rpm >}} | {{< cdownload freebsd arm-v7 >}} | {{< cdownload netbsd arm-v7 >}} | - | - | - |
|
||||
| ARM - 64 Bit | - | {{< cdownload osx arm64 >}} | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| ARM - 64 Bit | - | - | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Big Endian | - | - | {{< cdownload linux mips >}} | {{< cdownload linux mips deb >}} | {{< cdownload linux mips rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Little Endian | - | - | {{< cdownload linux mipsle >}} | {{< cdownload linux mipsle deb >}} | {{< cdownload linux mipsle rpm >}} | - | - | - | - | - |
|
||||
|
||||
|
||||
@@ -197,6 +197,21 @@ memory. It can be set smaller if you are tight on memory.
|
||||
|
||||
Impersonate this user when using a business account.
|
||||
|
||||
Note that if you want to use impersonate, you should make sure this
|
||||
flag is set when running "rclone config" as this will cause rclone to
|
||||
request the "members.read" scope which it won't normally. This is
|
||||
needed to lookup a members email address into the internal ID that
|
||||
dropbox uses in the API.
|
||||
|
||||
Using the "members.read" scope will require a Dropbox Team Admin
|
||||
to approve during the OAuth flow.
|
||||
|
||||
You will have to use your own App (setting your own client_id and
|
||||
client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
|
||||
|
||||
- Config: impersonate
|
||||
- Env Var: RCLONE_DROPBOX_IMPERSONATE
|
||||
- Type: string
|
||||
|
||||
@@ -78,24 +78,6 @@ separator or the beginning of the path/file.
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
|
||||
The top level of the remote may not be the top level of the drive.
|
||||
|
||||
E.g. for a Microsoft Windows local directory structure
|
||||
|
||||
F:
|
||||
├── bkp
|
||||
├── data
|
||||
│ ├── excl
|
||||
│ │ ├── 123.jpg
|
||||
│ │ └── 456.jpg
|
||||
│ ├── incl
|
||||
│ │ └── document.pdf
|
||||
|
||||
To copy the contents of folder `data` into folder `bkp` excluding the contents of subfolder
|
||||
`excl`the following command treats `F:\data` and `F:\bkp` as top level for filtering.
|
||||
|
||||
`rclone copy F:\data\ F:\bkp\ --exclude=/excl/**`
|
||||
|
||||
**Important** Use `/` in path/file name patterns and not `\` even if
|
||||
running on Microsoft Windows.
|
||||
|
||||
@@ -113,7 +95,7 @@ With `--ignore-case`
|
||||
|
||||
## How filter rules are applied to files
|
||||
|
||||
Rclone path/file name filters are made up of one or more of the following flags:
|
||||
Rclone path / file name filters are made up of one or more of the following flags:
|
||||
|
||||
* `--include`
|
||||
* `--include-from`
|
||||
@@ -139,7 +121,7 @@ To mix up the order of processing includes and excludes use `--filter...`
|
||||
flags.
|
||||
|
||||
Within `--include-from`, `--exclude-from` and `--filter-from` flags
|
||||
rules are processed from top to bottom of the referenced file.
|
||||
rules are processed from top to bottom of the referenced file..
|
||||
|
||||
If there is an `--include` or `--include-from` flag specified, rclone
|
||||
implies a `- **` rule which it adds to the bottom of the internal rule
|
||||
@@ -171,7 +153,7 @@ classes. [Go regular expression reference](https://golang.org/pkg/regexp/syntax/
|
||||
|
||||
### How filter rules are applied to directories
|
||||
|
||||
Rclone commands are applied to path/file names not
|
||||
Rclone commands filter, and are applied to, path/file names not
|
||||
directories. The entire contents of a directory can be matched
|
||||
to a filter by the pattern `directory/*` or recursively by
|
||||
`directory/**`.
|
||||
@@ -185,15 +167,15 @@ recurse into subdirectories. This potentially optimises access to a remote
|
||||
by avoiding listing unnecessary directories. Whether optimisation is
|
||||
desirable depends on the specific filter rules and source remote content.
|
||||
|
||||
Directory recursion optimisation occurs if either:
|
||||
Optimisation occurs if either:
|
||||
|
||||
* A source remote does not support the rclone `ListR` primitive. local,
|
||||
sftp, Microsoft OneDrive and WebDav do not support `ListR`. Google
|
||||
* A source remote does not support the rclone `ListR` primitive. `local`,
|
||||
`sftp`, `Microsoft OneDrive` and `WebDav` do not support `ListR`. Google
|
||||
Drive and most bucket type storage do. [Full list](https://rclone.org/overview/#optional-features)
|
||||
|
||||
* On other remotes (those that support `ListR`), if the rclone command is not naturally recursive, and
|
||||
* On other remotes, if the rclone command is not naturally recursive,
|
||||
provided it is not run with the `--fast-list` flag. `ls`, `lsf -R` and
|
||||
`size` are naturally recursive but `sync`, `copy` and `move` are not.
|
||||
`size` are recursive but `sync`, `copy` and `move` are not.
|
||||
|
||||
* Whenever the `--disable ListR` flag is applied to an rclone command.
|
||||
|
||||
@@ -215,7 +197,7 @@ to be specified.
|
||||
|
||||
E.g. `rclone ls remote: --include /directory/` will not match any
|
||||
files. Because it is an `--include` option the `--exclude **` rule
|
||||
is implied, and the `/directory/` pattern serves only to optimise
|
||||
is implied, and the `\directory\` pattern serves only to optimise
|
||||
access to the remote by ignoring everything outside of that directory.
|
||||
|
||||
E.g. `rclone ls remote: --filter-from filter-list.txt` with a file
|
||||
@@ -228,7 +210,7 @@ E.g. `rclone ls remote: --filter-from filter-list.txt` with a file
|
||||
|
||||
All files in directories `dir1` or `dir2` or their subdirectories
|
||||
are completely excluded from the listing. Only files of suffix
|
||||
`pdf` in the root of `remote:` or its subdirectories are listed.
|
||||
`'pdf` in the root of `remote:` or its subdirectories are listed.
|
||||
The `- **` rule prevents listing of any path/files not previously
|
||||
matched by the rules above.
|
||||
|
||||
@@ -259,8 +241,8 @@ directories.
|
||||
|
||||
E.g. on Microsoft Windows `rclone ls remote: --exclude "*\[{JP,KR,HK}\]*"`
|
||||
lists the files in `remote:` with `[JP]` or `[KR]` or `[HK]` in
|
||||
their name. Quotes prevent the shell from interpreting the `\`
|
||||
characters.`\` characters escape the `[` and `]` so an rclone filter
|
||||
their name. The single quotes prevent the shell from interpreting the `\`
|
||||
characters. The `\` characters escape the `[` and `]` so ran clone filter
|
||||
treats them literally rather than as a character-range. The `{` and `}`
|
||||
define an rclone pattern list. For other operating systems single quotes are
|
||||
required ie `rclone ls remote: --exclude '*\[{JP,KR,HK}\]*'`
|
||||
@@ -507,13 +489,13 @@ The three files are transferred as follows:
|
||||
/home/user1/dir/ford → remote:backup/user1/dir/file
|
||||
/home/user2/prefect → remote:backup/user2/stuff
|
||||
|
||||
Alternatively if `/` is chosen as root `files-from.txt` will be:
|
||||
Alternatively if `/` is chosen as root `files-from.txt` would be:
|
||||
|
||||
/home/user1/42
|
||||
/home/user1/dir/ford
|
||||
/home/user2/prefect
|
||||
|
||||
The copy command will be:
|
||||
The copy command would be:
|
||||
|
||||
rclone copy --files-from files-from.txt / remote:backup
|
||||
|
||||
@@ -594,10 +576,10 @@ Default units are seconds or the following abbreviations are valid:
|
||||
`--max-age` can also be specified as an absolute time in the following
|
||||
formats:
|
||||
|
||||
- RFC3339 - e.g. `2006-01-02T15:04:05Z` or `2006-01-02T15:04:05+07:00`
|
||||
- ISO8601 Date and time, local timezone - `2006-01-02T15:04:05`
|
||||
- ISO8601 Date and time, local timezone - `2006-01-02 15:04:05`
|
||||
- ISO8601 Date - `2006-01-02` (YYYY-MM-DD)
|
||||
- RFC3339 - e.g. "2006-01-02T15:04:05Z07:00"
|
||||
- ISO8601 Date and time, local timezone - "2006-01-02T15:04:05"
|
||||
- ISO8601 Date and time, local timezone - "2006-01-02 15:04:05"
|
||||
- ISO8601 Date - "2006-01-02" (YYYY-MM-DD)
|
||||
|
||||
`--max-age` applies only to files and not to directories.
|
||||
|
||||
@@ -621,7 +603,7 @@ old or more.
|
||||
**Important** this flag is dangerous to your data - use with `--dry-run`
|
||||
and `-v` first.
|
||||
|
||||
In conjunction with `rclone sync`, `--delete-excluded` deletes any files
|
||||
In conjunction with `rclone sync` the `--delete-excluded deletes any files
|
||||
on the destination which are excluded from the command.
|
||||
|
||||
E.g. the scope of `rclone sync -i A: B:` can be restricted:
|
||||
|
||||
@@ -42,7 +42,6 @@ These flags are available for every command.
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP headers - may contain sensitive info
|
||||
--dscp DSCP Name or Value (default 0)
|
||||
--error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file (use - to read from stdin)
|
||||
@@ -151,7 +150,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.1")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -282,7 +281,7 @@ and may be set in the config file.
|
||||
--drive-starred-only Only show files that are starred.
|
||||
--drive-stop-on-download-limit Make download limit errors be fatal
|
||||
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-team-drive string ID of the Shared Drive (Team Drive)
|
||||
--drive-token string OAuth Access Token as a JSON blob.
|
||||
--drive-token-url string Token server url.
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
@@ -563,6 +562,11 @@ and may be set in the config file.
|
||||
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-token string OAuth Access Token as a JSON blob.
|
||||
--yandex-token-url string Token server url.
|
||||
--zoho-auth-url string Auth server URL.
|
||||
--zoho-client-id string OAuth Client Id
|
||||
--zoho-client-secret string OAuth Client Secret
|
||||
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
|
||||
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
--zoho-token string OAuth Access Token as a JSON blob.
|
||||
--zoho-token-url string Token server url.
|
||||
```
|
||||
|
||||
@@ -181,7 +181,7 @@ kill %1
|
||||
|
||||
## Install from source ##
|
||||
|
||||
Make sure you have at least [Go](https://golang.org/) go1.13
|
||||
Make sure you have at least [Go](https://golang.org/) 1.12
|
||||
installed. [Download go](https://golang.org/dl/) if necessary. The
|
||||
latest release is recommended. Then
|
||||
|
||||
|
||||
@@ -447,21 +447,6 @@ to override the default choice.
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --local-no-preallocate
|
||||
|
||||
Disable preallocation of disk space for transferred files
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.
|
||||
|
||||
- Config: no_preallocate
|
||||
- Env Var: RCLONE_LOCAL_NO_PREALLOCATE
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --local-no-sparse
|
||||
|
||||
Disable sparse files for multi-thread downloads
|
||||
|
||||
@@ -6,7 +6,7 @@ description: "Mail.ru Cloud"
|
||||
{{< icon "fas fa-at" >}} Mail.ru Cloud
|
||||
----------------------------------------
|
||||
|
||||
[Mail.ru Cloud](https://cloud.mail.ru/) is a cloud storage provided by a Russian internet company [Mail.Ru Group](https://mail.ru). The official desktop client is [Disk-O:](https://disk-o.cloud/en), available on Windows and Mac OS.
|
||||
[Mail.ru Cloud](https://cloud.mail.ru/) is a cloud storage provided by a Russian internet company [Mail.Ru Group](https://mail.ru). The official desktop client is [Disk-O:](https://disk-o.cloud/), available only on Windows. (Please note that official sites are in Russian)
|
||||
|
||||
Currently it is recommended to disable 2FA on Mail.ru accounts intended for rclone until it gets eventually implemented.
|
||||
|
||||
|
||||
@@ -330,7 +330,7 @@ upon backend specific capabilities.
|
||||
|
||||
| Name | Purge | Copy | Move | DirMove | CleanUp | ListR | StreamUpload | LinkSharing | About | EmptyDir |
|
||||
| ---------------------------- |:-----:|:----:|:----:|:-------:|:-------:|:-----:|:------------:|:------------:|:-----:| :------: |
|
||||
| 1Fichier | No | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| 1Fichier | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| Amazon Drive | Yes | No | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | Yes |
|
||||
| Amazon S3 | No | Yes | No | No | Yes | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |
|
||||
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No |
|
||||
|
||||
@@ -203,6 +203,8 @@ Rather than
|
||||
rclone rc operations/list --json '{"fs": "/tmp", "remote": "test", "opt": {"showHash": true}}'
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Special parameters
|
||||
|
||||
The rc interface supports some special parameters which apply to
|
||||
@@ -273,69 +275,6 @@ $ rclone rc job/list
|
||||
}
|
||||
```
|
||||
|
||||
### Setting config flags with _config
|
||||
|
||||
If you wish to set config (the equivalent of the global flags) for the
|
||||
duration of an rc call only then pass in the `_config` parameter.
|
||||
|
||||
This should be in the same format as the `config` key returned by
|
||||
[options/get](#options-get).
|
||||
|
||||
For example, if you wished to run a sync with the `--checksum`
|
||||
parameter, you would pass this parameter in your JSON blob.
|
||||
|
||||
"_config":{"CheckSum": true}
|
||||
|
||||
If using `rclone rc` this could be passed as
|
||||
|
||||
rclone rc operations/sync ... _config='{"CheckSum": true}'
|
||||
|
||||
Any config parameters you don't set will inherit the global defaults
|
||||
which were set with command line flags or environment variables.
|
||||
|
||||
Note that it is possible to set some values as strings or integers -
|
||||
see [data types](/#data-types) for more info. Here is an example
|
||||
setting the equivalent of `--buffer-size` in string or integer format.
|
||||
|
||||
"_config":{"BufferSize": "42M"}
|
||||
"_config":{"BufferSize": 44040192}
|
||||
|
||||
If you wish to check the `_config` assignment has worked properly then
|
||||
calling `options/local` will show what the value got set to.
|
||||
|
||||
### Setting filter flags with _filter
|
||||
|
||||
If you wish to set filters for the duration of an rc call only then
|
||||
pass in the `_filter` parameter.
|
||||
|
||||
This should be in the same format as the `filter` key returned by
|
||||
[options/get](#options-get).
|
||||
|
||||
For example, if you wished to run a sync with these flags
|
||||
|
||||
--max-size 1M --max-age 42s --include "a" --include "b"
|
||||
|
||||
you would pass this parameter in your JSON blob.
|
||||
|
||||
"_filter":{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}
|
||||
|
||||
If using `rclone rc` this could be passed as
|
||||
|
||||
rclone rc ... _filter='{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}'
|
||||
|
||||
Any filter parameters you don't set will inherit the global defaults
|
||||
which were set with command line flags or environment variables.
|
||||
|
||||
Note that it is possible to set some values as strings or integers -
|
||||
see [data types](/#data-types) for more info. Here is an example
|
||||
setting the equivalent of `--buffer-size` in string or integer format.
|
||||
|
||||
"_filter":{"MinSize": "42M"}
|
||||
"_filter":{"MinSize": 44040192}
|
||||
|
||||
If you wish to check the `_filter` assignment has worked properly then
|
||||
calling `options/local` will show what the value got set to.
|
||||
|
||||
### Assigning operations to groups with _group = value
|
||||
|
||||
Each rc call has its own stats group for tracking its metrics. By default
|
||||
@@ -355,29 +294,6 @@ $ rclone rc --json '{ "group": "job/1" }' core/stats
|
||||
}
|
||||
```
|
||||
|
||||
## Data types {#data-types}
|
||||
|
||||
When the API returns types, these will mostly be straight forward
|
||||
integer, string or boolean types.
|
||||
|
||||
However some of the types returned by the [options/get](#options-get)
|
||||
call and taken by the [options/set](#options-set) calls as well as the
|
||||
`vfsOpt`, `mountOpt` and the `_config` parameters.
|
||||
|
||||
- `Duration` - these are returned as an integer duration in
|
||||
nanoseconds. They may be set as an integer, or they may be set with
|
||||
time string, eg "5s". See the [options section](/docs/#options) for
|
||||
more info.
|
||||
- `Size` - these are returned as an integer number of bytes. They may
|
||||
be set as an integer or they may be set with a size suffix string,
|
||||
eg "10M". See the [options section](/docs/#options) for more info.
|
||||
- Enumerated type (such as `CutoffMode`, `DumpFlags`, `LogLevel`,
|
||||
`VfsCacheMode` - these will be returned as an integer and may be set
|
||||
as an integer but more conveniently they can be set as a string, eg
|
||||
"HARD" for `CutoffMode` or `DEBUG` for `LogLevel`.
|
||||
- `BandwidthSpec` - this will be set and returned as a string, eg
|
||||
"1M".
|
||||
|
||||
## Supported commands
|
||||
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
|
||||
### backend/command: Runs a backend command. {#backend-command}
|
||||
@@ -1207,6 +1123,7 @@ This takes the following parameters
|
||||
- fs - a remote name string e.g. "drive:"
|
||||
- remote - a path within that remote e.g. "dir"
|
||||
- each part in body represents a file to be uploaded
|
||||
See the [uploadfile command](/commands/rclone_uploadfile/) command for more information on the above.
|
||||
|
||||
**Authentication is required for this call.**
|
||||
|
||||
@@ -1238,18 +1155,17 @@ changed like this.
|
||||
|
||||
For example:
|
||||
|
||||
This sets DEBUG level logs (-vv) (these can be set by number or string)
|
||||
This sets DEBUG level logs (-vv)
|
||||
|
||||
rclone rc options/set --json '{"main": {"LogLevel": "DEBUG"}}'
|
||||
rclone rc options/set --json '{"main": {"LogLevel": 8}}'
|
||||
|
||||
And this sets INFO level logs (-v)
|
||||
|
||||
rclone rc options/set --json '{"main": {"LogLevel": "INFO"}}'
|
||||
rclone rc options/set --json '{"main": {"LogLevel": 7}}'
|
||||
|
||||
And this sets NOTICE level logs (normal without -v)
|
||||
|
||||
rclone rc options/set --json '{"main": {"LogLevel": "NOTICE"}}'
|
||||
rclone rc options/set --json '{"main": {"LogLevel": 6}}'
|
||||
|
||||
### pluginsctl/addPlugin: Add a plugin using url {#pluginsctl-addPlugin}
|
||||
|
||||
@@ -1372,6 +1288,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
|
||||
|
||||
See the [copy command](/commands/rclone_copy/) command for more information on the above.
|
||||
@@ -1384,6 +1301,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
- deleteEmptySrcDirs - delete empty src directories if set
|
||||
|
||||
|
||||
@@ -1397,6 +1315,7 @@ This takes the following parameters
|
||||
|
||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||
- createEmptySrcDirs - create empty src directories on destination if set
|
||||
|
||||
|
||||
See the [sync command](/commands/rclone_sync/) command for more information on the above.
|
||||
|
||||
@@ -45,11 +45,9 @@ Choose a number from below, or type in your own value
|
||||
\ "nextcloud"
|
||||
2 / Owncloud
|
||||
\ "owncloud"
|
||||
3 / Sharepoint Online, authenticated by Microsoft account.
|
||||
3 / Sharepoint
|
||||
\ "sharepoint"
|
||||
4 / Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
|
||||
\ "sharepoint-ntlm"
|
||||
5 / Other site/service or software
|
||||
4 / Other site/service or software
|
||||
\ "other"
|
||||
vendor> 1
|
||||
User name
|
||||
@@ -138,8 +136,6 @@ Name of the Webdav site/service/software you are using
|
||||
- Owncloud
|
||||
- "sharepoint"
|
||||
- Sharepoint
|
||||
- "sharepoint-ntlm"
|
||||
- Sharepoint with NTLM authentication
|
||||
- "other"
|
||||
- Other site/service or software
|
||||
|
||||
@@ -152,8 +148,6 @@ User name
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
In case vendor mode `sharepoint-ntlm` is used, the user name is in the form `DOMAIN\user`
|
||||
|
||||
#### --webdav-pass
|
||||
|
||||
Password.
|
||||
@@ -207,7 +201,7 @@ This is configured in an identical way to Owncloud. Note that
|
||||
Nextcloud initially did not support streaming of files (`rcat`) whereas
|
||||
Owncloud did, but [this](https://github.com/nextcloud/nextcloud-snap/issues/365) seems to be fixed as of 2020-11-27 (tested with rclone v1.53.1 and Nextcloud Server v19).
|
||||
|
||||
### Sharepoint Online ###
|
||||
### Sharepoint ###
|
||||
|
||||
Rclone can be used with Sharepoint provided by OneDrive for Business
|
||||
or Office365 Education Accounts.
|
||||
@@ -243,40 +237,11 @@ Your config file should look like this:
|
||||
[sharepoint]
|
||||
type = webdav
|
||||
url = https://[YOUR-DOMAIN]-my.sharepoint.com/personal/[YOUR-EMAIL]/Documents
|
||||
vendor = sharepoint
|
||||
vendor = other
|
||||
user = YourEmailAddress
|
||||
pass = encryptedpassword
|
||||
```
|
||||
|
||||
### Sharepoint with NTLM Authentication ###
|
||||
|
||||
Use this option in case your (hosted) Sharepoint is not tied to OneDrive accounts and uses NTLM authentication.
|
||||
|
||||
To get the `url` configuration, similarly to the above, first navigate to the desired directory in your browser to get the URL,
|
||||
then strip everything after the name of the opened directory.
|
||||
|
||||
Example:
|
||||
If the URL is:
|
||||
https://example.sharepoint.com/sites/12345/Documents/Forms/AllItems.aspx
|
||||
|
||||
The configuration to use would be:
|
||||
https://example.sharepoint.com/sites/12345/Documents
|
||||
|
||||
Set the `vendor` to `sharepoint-ntlm`.
|
||||
|
||||
NTLM uses domain and user name combination for authentication,
|
||||
set `user` to `DOMAIN\username`.
|
||||
|
||||
Your config file should look like this:
|
||||
|
||||
```
|
||||
[sharepoint]
|
||||
type = webdav
|
||||
url = https://[YOUR-DOMAIN]/some-path-to/Documents
|
||||
vendor = sharepoint-ntlm
|
||||
user = DOMAIN\user
|
||||
pass = encryptedpassword
|
||||
```
|
||||
#### Required Flags for SharePoint ####
|
||||
As SharePoint does some special things with uploaded documents, you won't be able to use the documents size or the documents hash to compare if a file has been changed since the upload / which file is newer.
|
||||
|
||||
|
||||
@@ -76,12 +76,11 @@ y/e/d>
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Rclone runs a webserver on your local computer to collect the
|
||||
authorization token from Zoho Workdrive. This is only from the moment
|
||||
your browser is opened until the token is returned.
|
||||
The webserver runs on `http://127.0.0.1:53682/`.
|
||||
If local port `53682` is protected by a firewall you may need to temporarily
|
||||
unblock the firewall to complete authorization.
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Zoho Workdrive. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification code.
|
||||
This is on `http://127.0.0.1:53682/` and this it may require you to
|
||||
unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
@@ -128,6 +127,26 @@ from filenames during upload.
|
||||
|
||||
Here are the standard options specific to zoho (Zoho).
|
||||
|
||||
#### --zoho-client-id
|
||||
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-client-secret
|
||||
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ZOHO_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-region
|
||||
|
||||
Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||
@@ -150,6 +169,35 @@ Zoho region to connect to. You'll have to use the region you organization is reg
|
||||
|
||||
Here are the advanced options specific to zoho (Zoho).
|
||||
|
||||
#### --zoho-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ZOHO_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ZOHO_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ZOHO_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --zoho-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.55.0
|
||||
v1.54.2
|
||||
@@ -34,6 +34,6 @@ func TestLimitTPS(t *testing.T) {
|
||||
tpsBucket = nil
|
||||
}()
|
||||
|
||||
timeTransactions(100, 900*time.Millisecond, 5000*time.Millisecond)
|
||||
timeTransactions(100, 900*time.Millisecond, 2000*time.Millisecond)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -265,19 +264,3 @@ func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
|
||||
func (x BwTimetable) Type() string {
|
||||
return "BwTimetable"
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals a string value
|
||||
func (x *BwTimetable) UnmarshalJSON(in []byte) error {
|
||||
var s string
|
||||
err := json.Unmarshal(in, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return x.Set(s)
|
||||
}
|
||||
|
||||
// MarshalJSON marshals as a string value
|
||||
func (x BwTimetable) MarshalJSON() ([]byte, error) {
|
||||
s := x.String()
|
||||
return json.Marshal(s)
|
||||
}
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ flagger = (*BwTimetable)(nil)
|
||||
var _ pflag.Value = (*BwTimetable)(nil)
|
||||
|
||||
func TestBwTimetableSet(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
@@ -464,102 +464,3 @@ func TestBwTimetableLimitAt(t *testing.T) {
|
||||
assert.Equal(t, test.want, slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBwTimetableUnmarshalJSON(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want BwTimetable
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
`"Mon-10:20,bad"`,
|
||||
BwTimetable(nil),
|
||||
true,
|
||||
},
|
||||
{
|
||||
`"0"`,
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 0, Rx: 0}},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
`"666"`,
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
`"666:333"`,
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
`"10:20,666"`,
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
},
|
||||
false,
|
||||
},
|
||||
} {
|
||||
var bwt BwTimetable
|
||||
err := json.Unmarshal([]byte(test.in), &bwt)
|
||||
if test.err {
|
||||
require.Error(t, err, test.in)
|
||||
} else {
|
||||
require.NoError(t, err, test.in)
|
||||
}
|
||||
assert.Equal(t, test.want, bwt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBwTimetableMarshalJSON(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in BwTimetable
|
||||
want string
|
||||
}{
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 0, Rx: 0}},
|
||||
},
|
||||
`"0"`,
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
},
|
||||
`"666k"`,
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
||||
},
|
||||
`"666k:333k"`,
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||
},
|
||||
`"Sun-10:20,666k Mon-10:20,666k Tue-10:20,666k Wed-10:20,666k Thu-10:20,666k Fri-10:20,666k Sat-10:20,666k"`,
|
||||
},
|
||||
} {
|
||||
got, err := json.Marshal(test.in)
|
||||
require.NoError(t, err, test.want)
|
||||
assert.Equal(t, test.want, string(got))
|
||||
}
|
||||
}
|
||||
|
||||
13
fs/cache/cache.go
vendored
13
fs/cache/cache.go
vendored
@@ -104,19 +104,6 @@ func Get(ctx context.Context, fsString string) (f fs.Fs, err error) {
|
||||
return GetFn(ctx, fsString, fs.NewFs)
|
||||
}
|
||||
|
||||
// GetArr gets []fs.Fs from []fsStrings either from the cache or creates it afresh
|
||||
func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
|
||||
var fArr []fs.Fs
|
||||
for _, fsString := range fsStrings {
|
||||
f1, err1 := GetFn(ctx, fsString, fs.NewFs)
|
||||
if err1 != nil {
|
||||
return fArr, err1
|
||||
}
|
||||
fArr = append(fArr, f1)
|
||||
}
|
||||
return fArr, nil
|
||||
}
|
||||
|
||||
// Put puts an fs.Fs named fsString into the cache
|
||||
func Put(fsString string, f fs.Fs) {
|
||||
canonicalName := fs.ConfigString(f)
|
||||
|
||||
13
fs/config.go
13
fs/config.go
@@ -76,8 +76,8 @@ type ConfigInfo struct {
|
||||
NoUnicodeNormalization bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
CompareDest []string
|
||||
CopyDest []string
|
||||
CompareDest string
|
||||
CopyDest string
|
||||
BackupDir string
|
||||
Suffix string
|
||||
SuffixKeepExtension bool
|
||||
@@ -122,7 +122,6 @@ type ConfigInfo struct {
|
||||
Headers []*HTTPOption
|
||||
RefreshTimes bool
|
||||
NoConsole bool
|
||||
TrafficClass uint8
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
@@ -164,14 +163,6 @@ func NewConfig() *ConfigInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
// TimeoutOrInfinite returns ci.Timeout if > 0 or infinite otherwise
|
||||
func (c *ConfigInfo) TimeoutOrInfinite() time.Duration {
|
||||
if c.Timeout > 0 {
|
||||
return c.Timeout
|
||||
}
|
||||
return ModTimeNotSupported
|
||||
}
|
||||
|
||||
type configContextKeyType struct{}
|
||||
|
||||
// Context key for config
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -30,7 +29,6 @@ var (
|
||||
deleteAfter bool
|
||||
bindAddr string
|
||||
disableFeatures string
|
||||
dscp string
|
||||
uploadHeaders []string
|
||||
downloadHeaders []string
|
||||
headers []string
|
||||
@@ -81,8 +79,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &ci.NoCheckDest, "no-check-dest", "", ci.NoCheckDest, "Don't check the destination, copy regardless.")
|
||||
flags.BoolVarP(flagSet, &ci.NoUnicodeNormalization, "no-unicode-normalization", "", ci.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.")
|
||||
flags.BoolVarP(flagSet, &ci.NoUpdateModTime, "no-update-modtime", "", ci.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringArrayVarP(flagSet, &ci.CompareDest, "compare-dest", "", nil, "Include additional comma separated server-side paths during comparison.")
|
||||
flags.StringArrayVarP(flagSet, &ci.CopyDest, "copy-dest", "", nil, "Implies --compare-dest but also copies files from paths into destination.")
|
||||
flags.StringVarP(flagSet, &ci.CompareDest, "compare-dest", "", ci.CompareDest, "Include additional server-side path during comparison.")
|
||||
flags.StringVarP(flagSet, &ci.CopyDest, "copy-dest", "", ci.CopyDest, "Implies --compare-dest but also copies files from path into destination.")
|
||||
flags.StringVarP(flagSet, &ci.BackupDir, "backup-dir", "", ci.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &ci.Suffix, "suffix", "", ci.Suffix, "Suffix to add to changed files.")
|
||||
flags.BoolVarP(flagSet, &ci.SuffixKeepExtension, "suffix-keep-extension", "", ci.SuffixKeepExtension, "Preserve the extension when using --suffix.")
|
||||
@@ -127,7 +125,6 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
|
||||
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
|
||||
flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window. Supported on Windows only.")
|
||||
flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.")
|
||||
}
|
||||
|
||||
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
||||
@@ -217,7 +214,7 @@ func SetFlags(ci *fs.ConfigInfo) {
|
||||
ci.DeleteMode = fs.DeleteModeDefault
|
||||
}
|
||||
|
||||
if len(ci.CompareDest) > 0 && len(ci.CopyDest) > 0 {
|
||||
if ci.CompareDest != "" && ci.CopyDest != "" {
|
||||
log.Fatalf(`Can't use --compare-dest with --copy-dest.`)
|
||||
}
|
||||
|
||||
@@ -257,13 +254,6 @@ func SetFlags(ci *fs.ConfigInfo) {
|
||||
if len(headers) != 0 {
|
||||
ci.Headers = ParseHeaders(headers)
|
||||
}
|
||||
if len(dscp) != 0 {
|
||||
if value, ok := parseDSCP(dscp); ok {
|
||||
ci.TrafficClass = value << 2
|
||||
} else {
|
||||
log.Fatalf("--dscp: Invalid DSCP name: %v", dscp)
|
||||
}
|
||||
}
|
||||
|
||||
// Make the config file absolute
|
||||
configPath, err := filepath.Abs(config.ConfigPath)
|
||||
@@ -276,61 +266,3 @@ func SetFlags(ci *fs.ConfigInfo) {
|
||||
ci.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed
|
||||
|
||||
}
|
||||
|
||||
// parseHeaders converts DSCP names to value
|
||||
func parseDSCP(dscp string) (uint8, bool) {
|
||||
if s, err := strconv.ParseUint(dscp, 10, 6); err == nil {
|
||||
return uint8(s), true
|
||||
}
|
||||
dscp = strings.ToUpper(dscp)
|
||||
switch dscp {
|
||||
case "BE":
|
||||
fallthrough
|
||||
case "DF":
|
||||
fallthrough
|
||||
case "CS0":
|
||||
return 0x00, true
|
||||
case "CS1":
|
||||
return 0x08, true
|
||||
case "AF11":
|
||||
return 0x0A, true
|
||||
case "AF12":
|
||||
return 0x0C, true
|
||||
case "AF13":
|
||||
return 0x0E, true
|
||||
case "CS2":
|
||||
return 0x10, true
|
||||
case "AF21":
|
||||
return 0x12, true
|
||||
case "AF22":
|
||||
return 0x14, true
|
||||
case "AF23":
|
||||
return 0x16, true
|
||||
case "CS3":
|
||||
return 0x18, true
|
||||
case "AF31":
|
||||
return 0x1A, true
|
||||
case "AF32":
|
||||
return 0x1C, true
|
||||
case "AF33":
|
||||
return 0x1E, true
|
||||
case "CS4":
|
||||
return 0x20, true
|
||||
case "AF41":
|
||||
return 0x22, true
|
||||
case "AF42":
|
||||
return 0x24, true
|
||||
case "AF43":
|
||||
return 0x26, true
|
||||
case "CS5":
|
||||
return 0x28, true
|
||||
case "EF":
|
||||
return 0x2E, true
|
||||
case "CS6":
|
||||
return 0x30, true
|
||||
case "LE":
|
||||
return 0x01, true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,14 +47,3 @@ func (m *CutoffMode) Set(s string) error {
|
||||
func (m *CutoffMode) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// UnmarshalJSON makes sure the value can be parsed as a string or integer in JSON
|
||||
func (m *CutoffMode) UnmarshalJSON(in []byte) error {
|
||||
return UnmarshalJSONFlag(in, m, func(i int64) error {
|
||||
if i < 0 || i >= int64(len(cutoffModeToString)) {
|
||||
return errors.Errorf("Out of range cutoff mode %d", i)
|
||||
}
|
||||
*m = (CutoffMode)(i)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user