mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 04:13:14 +00:00
Compare commits
28 Commits
v1.53.3
...
fix-ftp-PR
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8436468ae | ||
|
|
a2a94344f4 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d | ||
|
|
c3884aafd9 | ||
|
|
0a9785a4ff | ||
|
|
8140f67092 | ||
|
|
4a001b8a02 | ||
|
|
525433e6dd | ||
|
|
f71f6c57d7 | ||
|
|
e35623c72e | ||
|
|
344bce7e2a | ||
|
|
3a4322a7ba | ||
|
|
27b9ae4fc3 | ||
|
|
7e2488af10 | ||
|
|
41ecb586c4 |
18
.github/workflows/build.yml
vendored
18
.github/workflows/build.yml
vendored
@@ -107,10 +107,10 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -124,8 +124,6 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
@@ -135,10 +133,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -225,8 +223,8 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
|
||||
17481
MANUAL.html
generated
17481
MANUAL.html
generated
File diff suppressed because one or more lines are too long
583
MANUAL.md
generated
583
MANUAL.md
generated
@@ -1,6 +1,6 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Nov 19, 2020
|
||||
% Sep 02, 2020
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -146,7 +146,6 @@ WebDAV or S3, that work out of the box.)
|
||||
- StackPath
|
||||
- SugarSync
|
||||
- Tardigrade
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- Wasabi
|
||||
- WebDAV
|
||||
- Yandex Disk
|
||||
@@ -504,7 +503,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||
|
||||
# rclone copy
|
||||
|
||||
Copy files from source to dest, skipping already copied.
|
||||
Copy files from source to dest, skipping already copied
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -834,7 +833,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
@@ -860,7 +859,6 @@ rclone check source:path dest:path [flags]
|
||||
```
|
||||
--combined string Make a combined report of changes to this file
|
||||
--differ string Report all non-matching files to this file
|
||||
--download Check by downloading rather than with hash.
|
||||
--error string Report all files with errors (hashing or reading) to this file
|
||||
-h, --help help for check
|
||||
--match string Report all matching files to this file
|
||||
@@ -1193,7 +1191,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||
|
||||
# rclone cleanup
|
||||
|
||||
Clean up the remote if possible.
|
||||
Clean up the remote if possible
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -1917,7 +1915,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||
|
||||
# rclone copyto
|
||||
|
||||
Copy files from source to dest, skipping already copied.
|
||||
Copy files from source to dest, skipping already copied
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -2042,7 +2040,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
@@ -2436,7 +2434,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||
|
||||
# rclone lsf
|
||||
|
||||
List directories and objects in remote:path formatted for parsing.
|
||||
List directories and objects in remote:path formatted for parsing
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -2746,9 +2744,6 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
## Installing on Windows
|
||||
|
||||
To run rclone mount on Windows, you will need to
|
||||
@@ -2891,6 +2886,9 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
|
||||
## VFS - Virtual File System
|
||||
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
@@ -3054,11 +3052,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
@@ -3296,7 +3289,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||
|
||||
# rclone obscure
|
||||
|
||||
Obscure password for use in the rclone config file.
|
||||
Obscure password for use in the rclone config file
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -3761,11 +3754,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
@@ -4068,11 +4056,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
@@ -4531,11 +4514,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
@@ -5057,11 +5035,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
@@ -5529,11 +5502,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
@@ -6534,8 +6502,6 @@ This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag. See the [Logging section](#logging)
|
||||
for more info.
|
||||
|
||||
If FILE exists then rclone will append to it.
|
||||
|
||||
Note that if you are using the `logrotate` program to manage rclone's
|
||||
logs, then you should use the `copytruncate` option as rclone doesn't
|
||||
have a signal to rotate logs.
|
||||
@@ -7030,17 +6996,11 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak
|
||||
|
||||
will copy `/path/to/local` to `remote:current`, but for any files
|
||||
will sync `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted have .bak added.
|
||||
|
||||
If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||
it is recommended to put a filter rule in excluding the suffix
|
||||
otherwise the `sync` will delete the backup files.
|
||||
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
|
||||
### --suffix-keep-extension ###
|
||||
|
||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||
@@ -8933,8 +8893,6 @@ OR
|
||||
"result": "<Raw command line output>"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
**Authentication is required for this call.**
|
||||
|
||||
### core/gc: Runs a garbage collection. {#core-gc}
|
||||
@@ -9610,7 +9568,7 @@ This allows you to remove a plugin using it's name
|
||||
|
||||
This takes parameters
|
||||
|
||||
- name: name of the plugin in the format `author`/`plugin_name`
|
||||
- name: name of the plugin in the format <author>/<plugin_name>
|
||||
|
||||
Eg
|
||||
|
||||
@@ -9624,7 +9582,7 @@ This allows you to remove a plugin using it's name
|
||||
|
||||
This takes the following parameters
|
||||
|
||||
- name: name of the plugin in the format `author`/`plugin_name`
|
||||
- name: name of the plugin in the format <author>/<plugin_name>
|
||||
|
||||
Eg
|
||||
|
||||
@@ -10569,7 +10527,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.3")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -10668,7 +10626,7 @@ and may be set in the config file.
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-auth-url string Auth server URL.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-id string OAuth Client Id
|
||||
--drive-client-secret string OAuth Client Secret
|
||||
--drive-disable-http2 Disable drive using http2 (default true)
|
||||
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
|
||||
@@ -11517,7 +11475,6 @@ The S3 backend can be used with a number of different providers:
|
||||
- Minio
|
||||
- Scaleway
|
||||
- StackPath
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- Wasabi
|
||||
|
||||
|
||||
@@ -11638,7 +11595,7 @@ Choose a number from below, or type in your own value
|
||||
/ Asia Pacific (Mumbai)
|
||||
13 | Needs location constraint ap-south-1.
|
||||
\ "ap-south-1"
|
||||
/ Asia Pacific (Hong Kong) Region
|
||||
/ Asia Patific (Hong Kong) Region
|
||||
14 | Needs location constraint ap-east-1.
|
||||
\ "ap-east-1"
|
||||
/ South America (Sao Paulo) Region
|
||||
@@ -11955,7 +11912,7 @@ Vault API, so rclone cannot directly access Glacier Vaults.
|
||||
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
|
||||
#### --s3-provider
|
||||
|
||||
@@ -11986,8 +11943,6 @@ Choose your S3 provider.
|
||||
- Scaleway Object Storage
|
||||
- "StackPath"
|
||||
- StackPath Object Storage
|
||||
- "TencentCOS"
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- "Wasabi"
|
||||
- Wasabi Object Storage
|
||||
- "Other"
|
||||
@@ -12044,12 +11999,12 @@ Region to connect to.
|
||||
- "us-east-2"
|
||||
- US East (Ohio) Region
|
||||
- Needs location constraint us-east-2.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region
|
||||
- Needs location constraint us-west-1.
|
||||
- "us-west-2"
|
||||
- US West (Oregon) Region
|
||||
- Needs location constraint us-west-2.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region
|
||||
- Needs location constraint us-west-1.
|
||||
- "ca-central-1"
|
||||
- Canada (Central) Region
|
||||
- Needs location constraint ca-central-1.
|
||||
@@ -12059,15 +12014,9 @@ Region to connect to.
|
||||
- "eu-west-2"
|
||||
- EU (London) Region
|
||||
- Needs location constraint eu-west-2.
|
||||
- "eu-west-3"
|
||||
- EU (Paris) Region
|
||||
- Needs location constraint eu-west-3.
|
||||
- "eu-north-1"
|
||||
- EU (Stockholm) Region
|
||||
- Needs location constraint eu-north-1.
|
||||
- "eu-south-1"
|
||||
- EU (Milan) Region
|
||||
- Needs location constraint eu-south-1.
|
||||
- "eu-central-1"
|
||||
- EU (Frankfurt) Region
|
||||
- Needs location constraint eu-central-1.
|
||||
@@ -12083,36 +12032,15 @@ Region to connect to.
|
||||
- "ap-northeast-2"
|
||||
- Asia Pacific (Seoul)
|
||||
- Needs location constraint ap-northeast-2.
|
||||
- "ap-northeast-3"
|
||||
- Asia Pacific (Osaka-Local)
|
||||
- Needs location constraint ap-northeast-3.
|
||||
- "ap-south-1"
|
||||
- Asia Pacific (Mumbai)
|
||||
- Needs location constraint ap-south-1.
|
||||
- "ap-east-1"
|
||||
- Asia Pacific (Hong Kong) Region
|
||||
- Asia Patific (Hong Kong) Region
|
||||
- Needs location constraint ap-east-1.
|
||||
- "sa-east-1"
|
||||
- South America (Sao Paulo) Region
|
||||
- Needs location constraint sa-east-1.
|
||||
- "me-south-1"
|
||||
- Middle East (Bahrain) Region
|
||||
- Needs location constraint me-south-1.
|
||||
- "af-south-1"
|
||||
- Africa (Cape Town) Region
|
||||
- Needs location constraint af-south-1.
|
||||
- "cn-north-1"
|
||||
- China (Beijing) Region
|
||||
- Needs location constraint cn-north-1.
|
||||
- "cn-northwest-1"
|
||||
- China (Ningxia) Region
|
||||
- Needs location constraint cn-northwest-1.
|
||||
- "us-gov-east-1"
|
||||
- AWS GovCloud (US-East) Region
|
||||
- Needs location constraint us-gov-east-1.
|
||||
- "us-gov-west-1"
|
||||
- AWS GovCloud (US) Region
|
||||
- Needs location constraint us-gov-west-1.
|
||||
|
||||
#### --s3-region
|
||||
|
||||
@@ -12368,54 +12296,6 @@ Endpoint for StackPath Object Storage.
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for Tencent COS API.
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "cos.ap-beijing.myqcloud.com"
|
||||
- Beijing Region.
|
||||
- "cos.ap-nanjing.myqcloud.com"
|
||||
- Nanjing Region.
|
||||
- "cos.ap-shanghai.myqcloud.com"
|
||||
- Shanghai Region.
|
||||
- "cos.ap-guangzhou.myqcloud.com"
|
||||
- Guangzhou Region.
|
||||
- "cos.ap-nanjing.myqcloud.com"
|
||||
- Nanjing Region.
|
||||
- "cos.ap-chengdu.myqcloud.com"
|
||||
- Chengdu Region.
|
||||
- "cos.ap-chongqing.myqcloud.com"
|
||||
- Chongqing Region.
|
||||
- "cos.ap-hongkong.myqcloud.com"
|
||||
- Hong Kong (China) Region.
|
||||
- "cos.ap-singapore.myqcloud.com"
|
||||
- Singapore Region.
|
||||
- "cos.ap-mumbai.myqcloud.com"
|
||||
- Mumbai Region.
|
||||
- "cos.ap-seoul.myqcloud.com"
|
||||
- Seoul Region.
|
||||
- "cos.ap-bangkok.myqcloud.com"
|
||||
- Bangkok Region.
|
||||
- "cos.ap-tokyo.myqcloud.com"
|
||||
- Tokyo Region.
|
||||
- "cos.na-siliconvalley.myqcloud.com"
|
||||
- Silicon Valley Region.
|
||||
- "cos.na-ashburn.myqcloud.com"
|
||||
- Virginia Region.
|
||||
- "cos.na-toronto.myqcloud.com"
|
||||
- Toronto Region.
|
||||
- "cos.eu-frankfurt.myqcloud.com"
|
||||
- Frankfurt Region.
|
||||
- "cos.eu-moscow.myqcloud.com"
|
||||
- Moscow Region.
|
||||
- "cos.accelerate.myqcloud.com"
|
||||
- Use Tencent COS Accelerate Endpoint.
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
|
||||
@@ -12453,22 +12333,18 @@ Used when creating buckets only.
|
||||
- Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
- "us-east-2"
|
||||
- US East (Ohio) Region.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region.
|
||||
- "us-west-2"
|
||||
- US West (Oregon) Region.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region.
|
||||
- "ca-central-1"
|
||||
- Canada (Central) Region.
|
||||
- "eu-west-1"
|
||||
- EU (Ireland) Region.
|
||||
- "eu-west-2"
|
||||
- EU (London) Region.
|
||||
- "eu-west-3"
|
||||
- EU (Paris) Region.
|
||||
- "eu-north-1"
|
||||
- EU (Stockholm) Region.
|
||||
- "eu-south-1"
|
||||
- EU (Milan) Region.
|
||||
- "EU"
|
||||
- EU Region.
|
||||
- "ap-southeast-1"
|
||||
@@ -12478,27 +12354,13 @@ Used when creating buckets only.
|
||||
- "ap-northeast-1"
|
||||
- Asia Pacific (Tokyo) Region.
|
||||
- "ap-northeast-2"
|
||||
- Asia Pacific (Seoul) Region.
|
||||
- "ap-northeast-3"
|
||||
- Asia Pacific (Osaka-Local) Region.
|
||||
- Asia Pacific (Seoul)
|
||||
- "ap-south-1"
|
||||
- Asia Pacific (Mumbai) Region.
|
||||
- Asia Pacific (Mumbai)
|
||||
- "ap-east-1"
|
||||
- Asia Pacific (Hong Kong) Region.
|
||||
- Asia Pacific (Hong Kong)
|
||||
- "sa-east-1"
|
||||
- South America (Sao Paulo) Region.
|
||||
- "me-south-1"
|
||||
- Middle East (Bahrain) Region.
|
||||
- "af-south-1"
|
||||
- Africa (Cape Town) Region.
|
||||
- "cn-north-1"
|
||||
- China (Beijing) Region
|
||||
- "cn-northwest-1"
|
||||
- China (Ningxia) Region.
|
||||
- "us-gov-east-1"
|
||||
- AWS GovCloud (US-East) Region.
|
||||
- "us-gov-west-1"
|
||||
- AWS GovCloud (US) Region.
|
||||
|
||||
#### --s3-location-constraint
|
||||
|
||||
@@ -12601,8 +12463,6 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "default"
|
||||
- Owner gets Full_CONTROL. No one else has access rights (default).
|
||||
- "private"
|
||||
- Owner gets FULL_CONTROL. No one else has access rights (default).
|
||||
- "public-read"
|
||||
@@ -12703,24 +12563,6 @@ The storage class to use when storing new objects in OSS.
|
||||
|
||||
#### --s3-storage-class
|
||||
|
||||
The storage class to use when storing new objects in Tencent COS.
|
||||
|
||||
- Config: storage_class
|
||||
- Env Var: RCLONE_S3_STORAGE_CLASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- ""
|
||||
- Default
|
||||
- "STANDARD"
|
||||
- Standard storage class
|
||||
- "ARCHIVE"
|
||||
- Archive storage mode.
|
||||
- "STANDARD_IA"
|
||||
- Infrequent access storage mode.
|
||||
|
||||
#### --s3-storage-class
|
||||
|
||||
The storage class to use when storing new objects in S3.
|
||||
|
||||
- Config: storage_class
|
||||
@@ -12737,7 +12579,7 @@ The storage class to use when storing new objects in S3.
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
|
||||
#### --s3-bucket-acl
|
||||
|
||||
@@ -12958,7 +12800,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
||||
false - rclone will do this automatically based on the provider
|
||||
setting.
|
||||
|
||||
@@ -13827,138 +13669,6 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Tencent COS {#tencent-cos}
|
||||
|
||||
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
|
||||
|
||||
To configure access to Tencent COS, follow the steps below:
|
||||
|
||||
1. Run `rclone config` and select `n` for a new remote.
|
||||
|
||||
```
|
||||
rclone config
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
```
|
||||
|
||||
2. Give the name of the configuration. For example, name it 'cos'.
|
||||
|
||||
```
|
||||
name> cos
|
||||
```
|
||||
|
||||
3. Select `s3` storage.
|
||||
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / 1Fichier
|
||||
\ "fichier"
|
||||
2 / Alias for an existing remote
|
||||
\ "alias"
|
||||
3 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
|
||||
\ "s3"
|
||||
[snip]
|
||||
Storage> s3
|
||||
```
|
||||
|
||||
4. Select `TencentCOS` provider.
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Web Services (AWS) S3
|
||||
\ "AWS"
|
||||
[snip]
|
||||
11 / Tencent Cloud Object Storage (COS)
|
||||
\ "TencentCOS"
|
||||
[snip]
|
||||
provider> TencentCOS
|
||||
```
|
||||
|
||||
5. Enter your SecretId and SecretKey of Tencent Cloud.
|
||||
|
||||
```
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> AKIDxxxxxxxxxx
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxx
|
||||
```
|
||||
|
||||
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
|
||||
|
||||
```
|
||||
1 / Beijing Region.
|
||||
\ "cos.ap-beijing.myqcloud.com"
|
||||
2 / Nanjing Region.
|
||||
\ "cos.ap-nanjing.myqcloud.com"
|
||||
3 / Shanghai Region.
|
||||
\ "cos.ap-shanghai.myqcloud.com"
|
||||
4 / Guangzhou Region.
|
||||
\ "cos.ap-guangzhou.myqcloud.com"
|
||||
[snip]
|
||||
endpoint> 4
|
||||
```
|
||||
|
||||
7. Choose acl and storage class.
|
||||
|
||||
```
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Owner gets Full_CONTROL. No one else has access rights (default).
|
||||
\ "default"
|
||||
[snip]
|
||||
acl> 1
|
||||
The storage class to use when storing new objects in Tencent COS.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Default
|
||||
\ ""
|
||||
[snip]
|
||||
storage_class> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[cos]
|
||||
type = s3
|
||||
provider = TencentCOS
|
||||
env_auth = false
|
||||
access_key_id = xxx
|
||||
secret_access_key = xxx
|
||||
endpoint = cos.ap-guangzhou.myqcloud.com
|
||||
acl = default
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
cos s3
|
||||
```
|
||||
|
||||
### Netease NOS ###
|
||||
|
||||
For Netease NOS configure as per the configurator `rclone config`
|
||||
@@ -14877,8 +14587,7 @@ Note that Box is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
Box file names can't have the `\` character in. rclone maps this to
|
||||
and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth
|
||||
Reverse Solidus).
|
||||
and from an identical looking unicode equivalent `\`.
|
||||
|
||||
Box only supports filenames up to 255 characters in length.
|
||||
|
||||
@@ -16160,26 +15869,23 @@ See: the [encoding section in the overview](https://rclone.org/overview/#encodin
|
||||
Crypt
|
||||
----------------------------------------
|
||||
|
||||
Rclone `crypt` remotes encrypt and decrypt other remotes.
|
||||
The `crypt` remote encrypts and decrypts another remote.
|
||||
|
||||
To use `crypt`, first set up the underlying remote. Follow the `rclone
|
||||
config` instructions for that remote.
|
||||
To use it first set up the underlying remote following the config
|
||||
instructions for that remote. You can also use a local pathname
|
||||
instead of a remote which will encrypt and decrypt from that directory
|
||||
which might be useful for encrypting onto a USB stick for example.
|
||||
|
||||
`crypt` applied to a local pathname instead of a remote will
|
||||
encrypt and decrypt that directory, and can be used to encrypt USB
|
||||
removable drives.
|
||||
First check your chosen remote is working - we'll call it
|
||||
`remote:path` in these docs. Note that anything inside `remote:path`
|
||||
will be encrypted and anything outside won't. This means that if you
|
||||
are using a bucket based remote (eg S3, B2, swift) then you should
|
||||
probably put the bucket in the remote `s3:bucket`. If you just use
|
||||
`s3:` then rclone will make encrypted bucket names too (if using file
|
||||
name encryption) which may or may not be what you want.
|
||||
|
||||
Before configuring the crypt remote, check the underlying remote is
|
||||
working. In this example the underlying remote is called `remote:path`.
|
||||
Anything inside `remote:path` will be encrypted and anything outside
|
||||
will not. In the case of an S3 based underlying remote (eg Amazon S3,
|
||||
B2, Swift) it is generally advisable to define a crypt remote in the
|
||||
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
|
||||
file name encryption, rclone will encrypt the bucket name.
|
||||
|
||||
Configure `crypt` using `rclone config`. In this example the `crypt`
|
||||
remote is called `secret`, to differentiate it from the underlying
|
||||
`remote`.
|
||||
Now configure `crypt` using `rclone config`. We will call this one
|
||||
`secret` to differentiate it from the `remote`.
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
@@ -16253,42 +15959,49 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
**Important** The crypt password stored in `rclone.conf` is lightly
|
||||
obscured. That only protects it from cursory inspection. It is not
|
||||
secure unless encryption of `rclone.conf` is specified.
|
||||
**Important** The password is stored in the config file is lightly
|
||||
obscured so it isn't immediately obvious what it is. It is in no way
|
||||
secure unless you use config file encryption.
|
||||
|
||||
A long passphrase is recommended, or `rclone config` can generate a
|
||||
random one.
|
||||
A long passphrase is recommended, or you can use a random one.
|
||||
|
||||
The obscured password is created using AES-CTR with a static key. The
|
||||
salt is stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared between all versions of rclone.
|
||||
The obscured password is created by using AES-CTR with a static key, with
|
||||
the salt stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared by between all versions of rclone.
|
||||
|
||||
If you reconfigure rclone with the same passwords/passphrases
|
||||
elsewhere it will be compatible, but the obscured version will be different
|
||||
due to the different salt.
|
||||
|
||||
Rclone does not encrypt
|
||||
Note that rclone does not encrypt
|
||||
|
||||
* file length - this can be calculated within 16 bytes
|
||||
* modification time - used for syncing
|
||||
|
||||
## Specifying the remote ##
|
||||
|
||||
In normal use, ensure the remote has a `:` in. If specified without,
|
||||
rclone uses a local directory of that name. For example if a remote
|
||||
`/path/to/secret/files` is specified, rclone encrypts content to that
|
||||
directory. If a remote `name` is specified, rclone targets a directory
|
||||
`name` in the current directory.
|
||||
In normal use, make sure the remote has a `:` in. If you specify the
|
||||
remote without a `:` then rclone will use a local directory of that
|
||||
name. So if you use a remote of `/path/to/secret/files` then rclone
|
||||
will encrypt stuff to that directory. If you use a remote of `name`
|
||||
then rclone will put files in a directory called `name` in the current
|
||||
directory.
|
||||
|
||||
If remote `remote:path/to/dir` is specified, rclone stores encrypted
|
||||
files in `path/to/dir` on the remote. With file name encryption, files
|
||||
saved to `secret:subdir/subfile` are stored in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` element is encrypted.
|
||||
If you specify the remote as `remote:path/to/dir` then rclone will
|
||||
store encrypted files in `path/to/dir` on the remote. If you are using
|
||||
file name encryption, then when you save files to
|
||||
`secret:subdir/subfile` this will store them in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
|
||||
|
||||
Note that unless you want encrypted bucket names (which are difficult
|
||||
to manage because you won't know what directory they represent in web
|
||||
interfaces etc), you should probably specify a bucket, eg
|
||||
`remote:secretbucket` when using bucket based remotes such as S3,
|
||||
Swift, Hubic, B2, GCS.
|
||||
|
||||
## Example ##
|
||||
|
||||
Create the following file structure using "standard" file name
|
||||
To test I made a little directory of files using "standard" file name
|
||||
encryption.
|
||||
|
||||
```
|
||||
@@ -16302,7 +16015,7 @@ plaintext/
|
||||
└── file4.txt
|
||||
```
|
||||
|
||||
Copy these to the remote, and list them
|
||||
Copy these to the remote and list them back
|
||||
|
||||
```
|
||||
$ rclone -q copy plaintext secret:
|
||||
@@ -16314,7 +16027,7 @@ $ rclone -q ls secret:
|
||||
9 subdir/file3.txt
|
||||
```
|
||||
|
||||
The crypt remote looks like
|
||||
Now see what that looked like when encrypted
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -16325,7 +16038,7 @@ $ rclone -q ls remote:path
|
||||
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
||||
```
|
||||
|
||||
The directory structure is preserved
|
||||
Note that this retains the directory structure which means you can do this
|
||||
|
||||
```
|
||||
$ rclone -q ls secret:subdir
|
||||
@@ -16334,9 +16047,9 @@ $ rclone -q ls secret:subdir
|
||||
10 subsubdir/file4.txt
|
||||
```
|
||||
|
||||
Without file name encryption `.bin` extensions are added to underlying
|
||||
names. This prevents the cloud provider attempting to interpret file
|
||||
content.
|
||||
If don't use file name encryption then the remote will look like this
|
||||
- note the `.bin` extensions added to prevent the cloud provider
|
||||
attempting to interpret the data.
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -16349,6 +16062,8 @@ $ rclone -q ls remote:path
|
||||
|
||||
### File name encryption modes ###
|
||||
|
||||
Here are some of the features of the file name encryption modes
|
||||
|
||||
Off
|
||||
|
||||
* doesn't hide file names or directory structure
|
||||
@@ -16367,19 +16082,17 @@ Standard
|
||||
Obfuscation
|
||||
|
||||
This is a simple "rotate" of the filename, with each file having a rot
|
||||
distance based on the filename. Rclone stores the distance at the
|
||||
beginning of the filename. A file called "hello" may become "53.jgnnq".
|
||||
distance based on the filename. We store the distance at the beginning
|
||||
of the filename. So a file called "hello" may become "53.jgnnq".
|
||||
|
||||
Obfuscation is not a strong encryption of filenames, but hinders
|
||||
automated scanning tools picking up on filename patterns. It is an
|
||||
intermediate between "off" and "standard" which allows for longer path
|
||||
segment names.
|
||||
This is not a strong encryption of filenames, but it may stop automated
|
||||
scanning tools from picking up on filename patterns. As such it's an
|
||||
intermediate between "off" and "standard". The advantage is that it
|
||||
allows for longer path segment names.
|
||||
|
||||
There is a possibility with some unicode based filenames that the
|
||||
obfuscation is weak and may map lower case characters to upper case
|
||||
equivalents.
|
||||
|
||||
Obfuscation cannot be relied upon for strong protection.
|
||||
equivalents. You can not rely on this for strong protection.
|
||||
|
||||
* file names very lightly obfuscated
|
||||
* file names can be longer than standard encryption
|
||||
@@ -16387,14 +16100,13 @@ Obfuscation cannot be relied upon for strong protection.
|
||||
* directory structure visible
|
||||
* identical files names will have identical uploaded names
|
||||
|
||||
Cloud storage systems have limits on file name length and
|
||||
total path length which rclone is more likely to breach using
|
||||
"Standard" file name encryption. Where file names are less thn 156
|
||||
characters in length issues should not be encountered, irrespective of
|
||||
cloud storage provider.
|
||||
Cloud storage systems have various limits on file name length and
|
||||
total path length which you are more likely to hit using "Standard"
|
||||
file name encryption. If you keep your file names to below 156
|
||||
characters in length then you should be OK on all providers.
|
||||
|
||||
An alternative, future rclone file name encryption mode may tolerate
|
||||
backend provider path length limits.
|
||||
There may be an even more secure file name encryption mode in the
|
||||
future which will address the long file name problem.
|
||||
|
||||
### Directory name encryption ###
|
||||
Crypt offers the option of encrypting dir names or leaving them intact.
|
||||
@@ -16420,10 +16132,10 @@ Example:
|
||||
Crypt stores modification times using the underlying remote so support
|
||||
depends on that.
|
||||
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
protected by an extremely strong crypto authenticator.
|
||||
|
||||
Use the `rclone cryptcheck` command to check the
|
||||
Note that you should use the `rclone cryptcheck` command to check the
|
||||
integrity of a crypted remote instead of `rclone check` which can't
|
||||
check the checksums properly.
|
||||
|
||||
@@ -18285,10 +17997,8 @@ Here are the standard options specific to drive (Google Drive).
|
||||
|
||||
#### --drive-client-id
|
||||
|
||||
Google Application Client Id
|
||||
Setting your own is recommended.
|
||||
See https://rclone.org/drive/#making-your-own-client-id for how to create your own.
|
||||
If you leave this blank, it will use an internal key which is low performance.
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_DRIVE_CLIENT_ID
|
||||
@@ -19978,13 +19688,8 @@ flag.
|
||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||
source does not have an MD5 checksum then the file will be cached
|
||||
temporarily on disk (wherever the `TMPDIR` environment variable points
|
||||
to) before it is uploaded. Small files will be cached in memory - see
|
||||
to) before it is uploaded. Small files will be cached in memory - see
|
||||
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
||||
When uploading from local disk the source checksum is always available,
|
||||
so this does not apply. Starting with rclone version 1.52 the same is
|
||||
true for crypted remotes (in older versions the crypt backend would not
|
||||
calculate hashes for uploads from local disk, so the Jottacloud
|
||||
backend had to do it as described above).
|
||||
|
||||
#### Restricted filename characters
|
||||
|
||||
@@ -25727,100 +25432,6 @@ Options:
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.53.3 - 2020-11-19
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.2...v1.53.3)
|
||||
|
||||
* Bug Fixes
|
||||
* random: Fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924 (Nick Craig-Wood)
|
||||
* Passwords you have generated with `rclone config` may be insecure
|
||||
* See [issue #4783](https://github.com/rclone/rclone/issues/4783) for more details and a checking tool
|
||||
* random: Seed math/rand in one place with crypto strong seed (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix vfs/refresh calls with fs= parameter (Nick Craig-Wood)
|
||||
* Sharefile
|
||||
* Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||
|
||||
## v1.53.2 - 2020-10-26
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||
|
||||
* Bug Fixes
|
||||
* acounting
|
||||
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||
* build
|
||||
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||
* Mount
|
||||
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||
* VFS
|
||||
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||
* Add a missed update of used cache space (Leo Luan)
|
||||
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||
* Fix upload over crypt (Ivan Andreev)
|
||||
* Fichier
|
||||
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||
* Jottacloud
|
||||
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||
* Avoid double url escaping of device/mountpoint (albertony)
|
||||
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||
* Mailru
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after june changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Onedrive
|
||||
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||
* Seafile
|
||||
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||
* SFTP
|
||||
* Always convert the checksum to lower case (buengese)
|
||||
* Union
|
||||
* Create root directories if none exist (Nick Craig-Wood)
|
||||
|
||||
## v1.53.1 - 2020-09-13
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||
* check
|
||||
* Add back missing --download flag (Nick Craig-Wood)
|
||||
* Fix docs (Nick Craig-Wood)
|
||||
* docs
|
||||
* Note --log-file does append (Nick Craig-Wood)
|
||||
* Add full stops for consistency in rclone --help (edwardxml)
|
||||
* Add Tencent COS to s3 provider list (wjielai)
|
||||
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||
* build
|
||||
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Local
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Re-adds special oauth help text (Tim Gallant)
|
||||
* Opendrive
|
||||
* Do not retry 400 errors (Evan Harris)
|
||||
|
||||
## v1.53.0 - 2020-09-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||
|
||||
16789
MANUAL.txt
generated
16789
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -296,8 +296,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -960,8 +958,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
info := f.wrapInfo(src, chunkRemote, size)
|
||||
|
||||
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||
c.chunkLimit = c.chunkSize
|
||||
// TODO: handle range/limit options
|
||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||
if errChunk != nil {
|
||||
@@ -1170,14 +1166,10 @@ func (c *chunkingReader) updateHashes() {
|
||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||
if c.chunkLimit <= 0 {
|
||||
// Chunk complete - switch to next one.
|
||||
// Note #1:
|
||||
// We might not get here because some remotes (eg. box multi-uploader)
|
||||
// read the specified size exactly and skip the concluding EOF Read.
|
||||
// Then a check in the put loop will kick in.
|
||||
// Note #2:
|
||||
// The crypt backend after receiving EOF here will call Read again
|
||||
// and we must insist on returning EOF, so we postpone refilling
|
||||
// chunkLimit to the main loop.
|
||||
c.chunkLimit = c.chunkSize
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(buf)) > c.chunkLimit {
|
||||
|
||||
@@ -2302,9 +2302,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// preserve the description on copy for docs
|
||||
info, err := f.getFile(actualID(srcObj.id), "description")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read description for Google Doc")
|
||||
fs.Errorf(srcObj, "Failed to read description for Google Doc: %v", err)
|
||||
} else {
|
||||
createInfo.Description = info.Description
|
||||
}
|
||||
createInfo.Description = info.Description
|
||||
} else {
|
||||
// don't overwrite the description on copy for files
|
||||
// this should work for docs but it doesn't - it is probably a bug in Google Drive
|
||||
|
||||
@@ -142,6 +142,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
|
||||
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_folders",
|
||||
Help: `Instructs rclone to work on shared folders.
|
||||
|
||||
When this flag is used with no path only the List operation is supported and
|
||||
all available shared folders will be listed. If you specify a path the first part
|
||||
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||
The shared folder is now pretty much a normal folder and all normal operations
|
||||
are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -161,9 +186,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -186,7 +213,9 @@ type Fs struct {
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
fs *Fs // what this object is part of
|
||||
id string
|
||||
url string
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
@@ -332,8 +361,60 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
})
|
||||
|
||||
// do not fill features yet
|
||||
if f.opt.SharedFiles {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil
|
||||
}
|
||||
_, err := f.findSharedFile(f.root)
|
||||
f.root = ""
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
if f.opt.SharedFolders {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil // our root it empty so we probably want to list shared folders
|
||||
}
|
||||
|
||||
dir := path.Dir(f.root)
|
||||
if dir == "." {
|
||||
dir = f.root
|
||||
}
|
||||
|
||||
// root is not empty so we have find the right shared folder if it exists
|
||||
id, err := f.findSharedFolder(dir)
|
||||
if err != nil {
|
||||
// if we didn't find the specified shared folder we have to bail out here
|
||||
return nil, err
|
||||
}
|
||||
// we found the specified shared folder so let's mount it
|
||||
// this will add it to the users normal root namespace and allows us
|
||||
// to actually perform operations on it using the normal api endpoints.
|
||||
err = f.mountSharedFolder(id)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sharing.MountFolderAPIError:
|
||||
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
f.opt.SharedFolders = false
|
||||
}
|
||||
|
||||
f.features.Fill(f)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
@@ -355,6 +436,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
@@ -465,9 +547,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.findSharedFile(remote)
|
||||
}
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFoldersArgs{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFolders(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFoldersContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFoldersContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.(*fs.Dir).Remote() == name {
|
||||
return entry.(*fs.Dir).ID(), nil
|
||||
}
|
||||
}
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// mountSharedFolders mount a shared folder to the root namespace
|
||||
func (f *Fs) mountSharedFolder(id string) error {
|
||||
arg := sharing.MountFolderArg{
|
||||
SharedFolderId: id,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.sharing.MountFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// listSharedFolders lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFilesArg{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFiles(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFilesContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFilesContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
fmt.Printf("%+v\n", entry)
|
||||
entryPath := entry.Name
|
||||
o := &Object{
|
||||
fs: f,
|
||||
url: entry.PreviewUrl,
|
||||
remote: entryPath,
|
||||
modTime: entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range files {
|
||||
if entry.(*Object).remote == name {
|
||||
return entry.(*Object), nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -478,6 +701,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles()
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders()
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
@@ -541,7 +771,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(remote, fileInfo)
|
||||
@@ -564,6 +794,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return nil, fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -579,6 +812,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't create or run metadata on root
|
||||
@@ -656,6 +892,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
@@ -927,8 +1166,16 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ID returns the object id
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return "", fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -948,6 +1195,7 @@ func (o *Object) Size() int64 {
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.id = info.Id
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
@@ -1016,10 +1264,27 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
if len(options) != 0 {
|
||||
return nil, errors.New("OpenOptions not supported for shared files")
|
||||
}
|
||||
arg := sharing.GetSharedLinkMetadataArg{
|
||||
Url: o.url,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
Path: o.id,
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1153,6 +1418,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
@@ -1181,6 +1449,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
@@ -1201,4 +1472,5 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
if size > int64(100e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
|
||||
@@ -373,9 +373,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
@@ -387,6 +384,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -553,7 +551,7 @@ func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -1089,7 +1087,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
retry, _ := shouldRetry(resp, err)
|
||||
return (retry && resp.StatusCode != 500), err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1193,6 +1192,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
||||
// dir gets moved regardless
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.StatusCode == 500 {
|
||||
_, err := f.NewObject(ctx, dstRemote)
|
||||
if err == fs.ErrorNotAFile {
|
||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
|
||||
@@ -1231,15 +1231,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
if runtime.GOOS == "windows" && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
} else {
|
||||
o.size = int64(len(linkdst))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -88,6 +89,9 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -117,6 +121,9 @@ func TestSymlink(t *testing.T) {
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
@@ -135,7 +142,9 @@ func TestSymlink(t *testing.T) {
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
|
||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime uint64 `json:"mtime,omitempty"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
@@ -159,6 +159,71 @@ type FolderInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -656,14 +655,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
switch item.Kind {
|
||||
case "folder":
|
||||
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
||||
dirSize := item.Count.Files + item.Count.Folders
|
||||
return dir, dirSize, nil
|
||||
case "file":
|
||||
@@ -677,7 +671,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: time.Unix(mTime, 0),
|
||||
modTime: time.Unix(item.Mtime, 0),
|
||||
}
|
||||
return file, -1, nil
|
||||
default:
|
||||
@@ -1867,30 +1861,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||
return f.shardURL, nil
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
RootURL: api.DispatchServerURL,
|
||||
Method: "GET",
|
||||
Path: "/u",
|
||||
}
|
||||
|
||||
var (
|
||||
res *http.Response
|
||||
url string
|
||||
err error
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
url, err = readBodyWord(res)
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
})
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = url
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/api/m1/dispatcher",
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"access_token": {token},
|
||||
},
|
||||
}
|
||||
|
||||
var info api.ShardInfoResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = info.Body.Upload[0].URL
|
||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||
|
||||
@@ -2122,18 +2116,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||
|
||||
headers := map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
if partialRequest {
|
||||
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||
headers["Range"] = rangeStr
|
||||
// headers["Content-Range"] = rangeStr
|
||||
headers["Accept-Ranges"] = "bytes"
|
||||
}
|
||||
start, end, partial := getTransferRange(o.size, options...)
|
||||
|
||||
// TODO: set custom timeouts
|
||||
opts := rest.Opts{
|
||||
@@ -2144,7 +2127,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
},
|
||||
ExtraHeaders: headers,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
||||
},
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
@@ -2165,36 +2151,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Server should respond with Status 206 and Content-Range header to a range
|
||||
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||
partialResponse := res.StatusCode == 206
|
||||
|
||||
var (
|
||||
hasher gohash.Hash
|
||||
wrapStream io.ReadCloser
|
||||
)
|
||||
if !partialResponse {
|
||||
var hasher gohash.Hash
|
||||
if !partial {
|
||||
// Cannot check hash of partial download
|
||||
hasher = mrhash.New()
|
||||
}
|
||||
wrapStream = &endHandler{
|
||||
wrapStream := &endHandler{
|
||||
ctx: ctx,
|
||||
stream: res.Body,
|
||||
hasher: hasher,
|
||||
o: o,
|
||||
server: server,
|
||||
}
|
||||
if partialRequest && !partialResponse {
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||
}
|
||||
return wrapStream, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1247,10 +1247,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||
|
||||
@@ -122,9 +122,6 @@ func init() {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
// References:
|
||||
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Provider: "AWS",
|
||||
@@ -134,12 +131,12 @@ func init() {
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
||||
@@ -149,15 +146,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
@@ -173,36 +164,15 @@ func init() {
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
@@ -615,12 +585,12 @@ func init() {
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region.",
|
||||
@@ -630,15 +600,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region.",
|
||||
}, {
|
||||
Value: "EU",
|
||||
Help: "EU Region.",
|
||||
@@ -653,37 +617,16 @@ func init() {
|
||||
Help: "Asia Pacific (Tokyo) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local) Region.",
|
||||
Help: "Asia Pacific (Seoul)",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai) Region.",
|
||||
Help: "Asia Pacific (Mumbai)",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Pacific (Hong Kong) Region.",
|
||||
Help: "Asia Pacific (Hong Kong)",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region.",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
|
||||
@@ -46,7 +46,7 @@ type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int64 `json:"size"`
|
||||
Size int `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
@@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||
|
||||
for _, library := range libraries {
|
||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||
d.SetSize(library.Size)
|
||||
d.SetSize(int64(library.Size))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
|
||||
@@ -1087,7 +1087,7 @@ func shellEscape(str string) string {
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
@@ -106,7 +106,7 @@ type UploadSpecification struct {
|
||||
type UploadFinishResponse struct {
|
||||
Error bool `json:"error"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
ErrorCode int `json:"errorCode,string"`
|
||||
ErrorCode int `json:"errorCode"`
|
||||
Value []struct {
|
||||
UploadID string `json:"uploadid"`
|
||||
ParentID string `json:"parentid"`
|
||||
@@ -114,7 +114,7 @@ type UploadFinishResponse struct {
|
||||
StreamID string `json:"streamid"`
|
||||
FileName string `json:"filename"`
|
||||
DisplayName string `json:"displayname"`
|
||||
Size int `json:"size,string"`
|
||||
Size int `json:"size"`
|
||||
Md5 string `json:"md5"`
|
||||
} `json:"value"`
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -19,10 +20,12 @@ type EpRand struct {
|
||||
}
|
||||
|
||||
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
|
||||
@@ -145,16 +145,11 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -823,7 +818,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
|
||||
@@ -9,6 +9,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -34,7 +35,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -512,9 +512,7 @@ func AddBackendFlags() {
|
||||
|
||||
// Main runs rclone interpreting flags and commands out of os.Args
|
||||
func Main() {
|
||||
if err := random.Seed(); err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
rand.Seed(time.Now().Unix())
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
|
||||
@@ -67,8 +67,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
modTime := node.ModTime()
|
||||
// set attributes
|
||||
vfs := node.VFS()
|
||||
attr.Owner.Gid = vfs.Opt.GID
|
||||
attr.Owner.Uid = vfs.Opt.UID
|
||||
attr.Owner.Gid = vfs.Opt.UID
|
||||
attr.Owner.Uid = vfs.Opt.GID
|
||||
attr.Mode = getMode(node)
|
||||
attr.Size = Size
|
||||
attr.Nlink = 1
|
||||
|
||||
@@ -336,6 +336,9 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
|
||||
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
` + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -409,3 +409,4 @@ put them back in again.` >}}
|
||||
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
|
||||
* WarpedPixel <WarpedPixel@users.noreply.github.com>
|
||||
* Sam Edwards <sam@samedwards.ca>
|
||||
* wjielai <gouki0123@gmail.com>
|
||||
|
||||
@@ -404,7 +404,6 @@ Note that Box is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
Box file names can't have the `\` character in. rclone maps this to
|
||||
and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth
|
||||
Reverse Solidus).
|
||||
and from an identical looking unicode equivalent `\`.
|
||||
|
||||
Box only supports filenames up to 255 characters in length.
|
||||
|
||||
@@ -5,100 +5,6 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.53.3 - 2020-11-19
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.2...v1.53.3)
|
||||
|
||||
* Bug Fixes
|
||||
* random: Fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924 (Nick Craig-Wood)
|
||||
* Passwords you have generated with `rclone config` may be insecure
|
||||
* See [issue #4783](https://github.com/rclone/rclone/issues/4783) for more details and a checking tool
|
||||
* random: Seed math/rand in one place with crypto strong seed (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix vfs/refresh calls with fs= parameter (Nick Craig-Wood)
|
||||
* Sharefile
|
||||
* Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||
|
||||
## v1.53.2 - 2020-10-26
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||
|
||||
* Bug Fixes
|
||||
* acounting
|
||||
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||
* build
|
||||
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||
* Mount
|
||||
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||
* VFS
|
||||
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||
* Add a missed update of used cache space (Leo Luan)
|
||||
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||
* Fix upload over crypt (Ivan Andreev)
|
||||
* Fichier
|
||||
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||
* Jottacloud
|
||||
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||
* Avoid double url escaping of device/mountpoint (albertony)
|
||||
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||
* Mailru
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after june changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Onedrive
|
||||
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||
* Seafile
|
||||
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||
* SFTP
|
||||
* Always convert the checksum to lower case (buengese)
|
||||
* Union
|
||||
* Create root directories if none exist (Nick Craig-Wood)
|
||||
|
||||
## v1.53.1 - 2020-09-13
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||
* check
|
||||
* Add back missing --download flag (Nick Craig-Wood)
|
||||
* Fix docs (Nick Craig-Wood)
|
||||
* docs
|
||||
* Note --log-file does append (Nick Craig-Wood)
|
||||
* Add full stops for consistency in rclone --help (edwardxml)
|
||||
* Add Tencent COS to s3 provider list (wjielai)
|
||||
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||
* build
|
||||
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Local
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Re-adds special oauth help text (Tim Gallant)
|
||||
* Opendrive
|
||||
* Do not retry 400 errors (Evan Harris)
|
||||
|
||||
## v1.53.0 - 2020-09-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||
|
||||
@@ -39,10 +39,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
|
||||
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
||||
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copyurl](/commands/rclone_copyurl/) - Copy url content to dest.
|
||||
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Cryptcheck checks the integrity of a crypted remote.
|
||||
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Cryptdecode returns unencrypted file names.
|
||||
@@ -56,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
||||
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
|
||||
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing.
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing
|
||||
* [rclone lsjson](/commands/rclone_lsjson/) - List directories and objects in the path in JSON format.
|
||||
* [rclone lsl](/commands/rclone_lsl/) - List the objects in path with modification time, size and path.
|
||||
* [rclone md5sum](/commands/rclone_md5sum/) - Produces an md5sum file for all the objects in the path.
|
||||
@@ -65,7 +65,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone move](/commands/rclone_move/) - Move files from source to dest.
|
||||
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
||||
* [rclone ncdu](/commands/rclone_ncdu/) - Explore a remote with a text based user interface.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file
|
||||
* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents.
|
||||
* [rclone rc](/commands/rclone_rc/) - Run a command against a running rclone.
|
||||
* [rclone rcat](/commands/rclone_rcat/) - Copies standard input to file on remote.
|
||||
|
||||
@@ -29,7 +29,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
@@ -55,7 +55,6 @@ rclone check source:path dest:path [flags]
|
||||
```
|
||||
--combined string Make a combined report of changes to this file
|
||||
--differ string Report all non-matching files to this file
|
||||
--download Check by downloading rather than with hash.
|
||||
--error string Report all files with errors (hashing or reading) to this file
|
||||
-h, --help help for check
|
||||
--match string Report all matching files to this file
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone cleanup"
|
||||
description: "Clean up the remote if possible."
|
||||
description: "Clean up the remote if possible"
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone cleanup
|
||||
|
||||
Clean up the remote if possible.
|
||||
Clean up the remote if possible
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copy"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copy
|
||||
|
||||
Copy files from source to dest, skipping already copied.
|
||||
Copy files from source to dest, skipping already copied
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copyto"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copyto
|
||||
|
||||
Copy files from source to dest, skipping already copied.
|
||||
Copy files from source to dest, skipping already copied
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone lsf"
|
||||
description: "List directories and objects in remote:path formatted for parsing."
|
||||
description: "List directories and objects in remote:path formatted for parsing"
|
||||
slug: rclone_lsf
|
||||
url: /commands/rclone_lsf/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone lsf
|
||||
|
||||
List directories and objects in remote:path formatted for parsing.
|
||||
List directories and objects in remote:path formatted for parsing
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -49,9 +49,6 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
## Installing on Windows
|
||||
|
||||
To run rclone mount on Windows, you will need to
|
||||
@@ -194,6 +191,9 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
|
||||
## VFS - Virtual File System
|
||||
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
@@ -357,11 +357,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone obscure"
|
||||
description: "Obscure password for use in the rclone config file."
|
||||
description: "Obscure password for use in the rclone config file"
|
||||
slug: rclone_obscure
|
||||
url: /commands/rclone_obscure/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone obscure
|
||||
|
||||
Obscure password for use in the rclone config file.
|
||||
Obscure password for use in the rclone config file
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -196,11 +196,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -195,11 +195,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -267,11 +267,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -206,11 +206,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -275,11 +275,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -6,26 +6,23 @@ description: "Encryption overlay remote"
|
||||
{{< icon "fa fa-lock" >}}Crypt
|
||||
----------------------------------------
|
||||
|
||||
Rclone `crypt` remotes encrypt and decrypt other remotes.
|
||||
The `crypt` remote encrypts and decrypts another remote.
|
||||
|
||||
To use `crypt`, first set up the underlying remote. Follow the `rclone
|
||||
config` instructions for that remote.
|
||||
To use it first set up the underlying remote following the config
|
||||
instructions for that remote. You can also use a local pathname
|
||||
instead of a remote which will encrypt and decrypt from that directory
|
||||
which might be useful for encrypting onto a USB stick for example.
|
||||
|
||||
`crypt` applied to a local pathname instead of a remote will
|
||||
encrypt and decrypt that directory, and can be used to encrypt USB
|
||||
removable drives.
|
||||
First check your chosen remote is working - we'll call it
|
||||
`remote:path` in these docs. Note that anything inside `remote:path`
|
||||
will be encrypted and anything outside won't. This means that if you
|
||||
are using a bucket based remote (eg S3, B2, swift) then you should
|
||||
probably put the bucket in the remote `s3:bucket`. If you just use
|
||||
`s3:` then rclone will make encrypted bucket names too (if using file
|
||||
name encryption) which may or may not be what you want.
|
||||
|
||||
Before configuring the crypt remote, check the underlying remote is
|
||||
working. In this example the underlying remote is called `remote:path`.
|
||||
Anything inside `remote:path` will be encrypted and anything outside
|
||||
will not. In the case of an S3 based underlying remote (eg Amazon S3,
|
||||
B2, Swift) it is generally advisable to define a crypt remote in the
|
||||
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
|
||||
file name encryption, rclone will encrypt the bucket name.
|
||||
|
||||
Configure `crypt` using `rclone config`. In this example the `crypt`
|
||||
remote is called `secret`, to differentiate it from the underlying
|
||||
`remote`.
|
||||
Now configure `crypt` using `rclone config`. We will call this one
|
||||
`secret` to differentiate it from the `remote`.
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
@@ -99,42 +96,49 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
**Important** The crypt password stored in `rclone.conf` is lightly
|
||||
obscured. That only protects it from cursory inspection. It is not
|
||||
secure unless encryption of `rclone.conf` is specified.
|
||||
**Important** The password is stored in the config file is lightly
|
||||
obscured so it isn't immediately obvious what it is. It is in no way
|
||||
secure unless you use config file encryption.
|
||||
|
||||
A long passphrase is recommended, or `rclone config` can generate a
|
||||
random one.
|
||||
A long passphrase is recommended, or you can use a random one.
|
||||
|
||||
The obscured password is created using AES-CTR with a static key. The
|
||||
salt is stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared between all versions of rclone.
|
||||
The obscured password is created by using AES-CTR with a static key, with
|
||||
the salt stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared by between all versions of rclone.
|
||||
|
||||
If you reconfigure rclone with the same passwords/passphrases
|
||||
elsewhere it will be compatible, but the obscured version will be different
|
||||
due to the different salt.
|
||||
|
||||
Rclone does not encrypt
|
||||
Note that rclone does not encrypt
|
||||
|
||||
* file length - this can be calculated within 16 bytes
|
||||
* modification time - used for syncing
|
||||
|
||||
## Specifying the remote ##
|
||||
|
||||
In normal use, ensure the remote has a `:` in. If specified without,
|
||||
rclone uses a local directory of that name. For example if a remote
|
||||
`/path/to/secret/files` is specified, rclone encrypts content to that
|
||||
directory. If a remote `name` is specified, rclone targets a directory
|
||||
`name` in the current directory.
|
||||
In normal use, make sure the remote has a `:` in. If you specify the
|
||||
remote without a `:` then rclone will use a local directory of that
|
||||
name. So if you use a remote of `/path/to/secret/files` then rclone
|
||||
will encrypt stuff to that directory. If you use a remote of `name`
|
||||
then rclone will put files in a directory called `name` in the current
|
||||
directory.
|
||||
|
||||
If remote `remote:path/to/dir` is specified, rclone stores encrypted
|
||||
files in `path/to/dir` on the remote. With file name encryption, files
|
||||
saved to `secret:subdir/subfile` are stored in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` element is encrypted.
|
||||
If you specify the remote as `remote:path/to/dir` then rclone will
|
||||
store encrypted files in `path/to/dir` on the remote. If you are using
|
||||
file name encryption, then when you save files to
|
||||
`secret:subdir/subfile` this will store them in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
|
||||
|
||||
Note that unless you want encrypted bucket names (which are difficult
|
||||
to manage because you won't know what directory they represent in web
|
||||
interfaces etc), you should probably specify a bucket, eg
|
||||
`remote:secretbucket` when using bucket based remotes such as S3,
|
||||
Swift, Hubic, B2, GCS.
|
||||
|
||||
## Example ##
|
||||
|
||||
Create the following file structure using "standard" file name
|
||||
To test I made a little directory of files using "standard" file name
|
||||
encryption.
|
||||
|
||||
```
|
||||
@@ -148,7 +152,7 @@ plaintext/
|
||||
└── file4.txt
|
||||
```
|
||||
|
||||
Copy these to the remote, and list them
|
||||
Copy these to the remote and list them back
|
||||
|
||||
```
|
||||
$ rclone -q copy plaintext secret:
|
||||
@@ -160,7 +164,7 @@ $ rclone -q ls secret:
|
||||
9 subdir/file3.txt
|
||||
```
|
||||
|
||||
The crypt remote looks like
|
||||
Now see what that looked like when encrypted
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -171,7 +175,7 @@ $ rclone -q ls remote:path
|
||||
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
||||
```
|
||||
|
||||
The directory structure is preserved
|
||||
Note that this retains the directory structure which means you can do this
|
||||
|
||||
```
|
||||
$ rclone -q ls secret:subdir
|
||||
@@ -180,9 +184,9 @@ $ rclone -q ls secret:subdir
|
||||
10 subsubdir/file4.txt
|
||||
```
|
||||
|
||||
Without file name encryption `.bin` extensions are added to underlying
|
||||
names. This prevents the cloud provider attempting to interpret file
|
||||
content.
|
||||
If don't use file name encryption then the remote will look like this
|
||||
- note the `.bin` extensions added to prevent the cloud provider
|
||||
attempting to interpret the data.
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -195,6 +199,8 @@ $ rclone -q ls remote:path
|
||||
|
||||
### File name encryption modes ###
|
||||
|
||||
Here are some of the features of the file name encryption modes
|
||||
|
||||
Off
|
||||
|
||||
* doesn't hide file names or directory structure
|
||||
@@ -213,19 +219,17 @@ Standard
|
||||
Obfuscation
|
||||
|
||||
This is a simple "rotate" of the filename, with each file having a rot
|
||||
distance based on the filename. Rclone stores the distance at the
|
||||
beginning of the filename. A file called "hello" may become "53.jgnnq".
|
||||
distance based on the filename. We store the distance at the beginning
|
||||
of the filename. So a file called "hello" may become "53.jgnnq".
|
||||
|
||||
Obfuscation is not a strong encryption of filenames, but hinders
|
||||
automated scanning tools picking up on filename patterns. It is an
|
||||
intermediate between "off" and "standard" which allows for longer path
|
||||
segment names.
|
||||
This is not a strong encryption of filenames, but it may stop automated
|
||||
scanning tools from picking up on filename patterns. As such it's an
|
||||
intermediate between "off" and "standard". The advantage is that it
|
||||
allows for longer path segment names.
|
||||
|
||||
There is a possibility with some unicode based filenames that the
|
||||
obfuscation is weak and may map lower case characters to upper case
|
||||
equivalents.
|
||||
|
||||
Obfuscation cannot be relied upon for strong protection.
|
||||
equivalents. You can not rely on this for strong protection.
|
||||
|
||||
* file names very lightly obfuscated
|
||||
* file names can be longer than standard encryption
|
||||
@@ -233,14 +237,13 @@ Obfuscation cannot be relied upon for strong protection.
|
||||
* directory structure visible
|
||||
* identical files names will have identical uploaded names
|
||||
|
||||
Cloud storage systems have limits on file name length and
|
||||
total path length which rclone is more likely to breach using
|
||||
"Standard" file name encryption. Where file names are less thn 156
|
||||
characters in length issues should not be encountered, irrespective of
|
||||
cloud storage provider.
|
||||
Cloud storage systems have various limits on file name length and
|
||||
total path length which you are more likely to hit using "Standard"
|
||||
file name encryption. If you keep your file names to below 156
|
||||
characters in length then you should be OK on all providers.
|
||||
|
||||
An alternative, future rclone file name encryption mode may tolerate
|
||||
backend provider path length limits.
|
||||
There may be an even more secure file name encryption mode in the
|
||||
future which will address the long file name problem.
|
||||
|
||||
### Directory name encryption ###
|
||||
Crypt offers the option of encrypting dir names or leaving them intact.
|
||||
@@ -266,10 +269,10 @@ Example:
|
||||
Crypt stores modification times using the underlying remote so support
|
||||
depends on that.
|
||||
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
protected by an extremely strong crypto authenticator.
|
||||
|
||||
Use the `rclone cryptcheck` command to check the
|
||||
Note that you should use the `rclone cryptcheck` command to check the
|
||||
integrity of a crypted remote instead of `rclone check` which can't
|
||||
check the checksums properly.
|
||||
|
||||
|
||||
@@ -1253,17 +1253,11 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak
|
||||
|
||||
will copy `/path/to/local` to `remote:current`, but for any files
|
||||
will sync `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted have .bak added.
|
||||
|
||||
If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||
it is recommended to put a filter rule in excluding the suffix
|
||||
otherwise the `sync` will delete the backup files.
|
||||
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
|
||||
### --suffix-keep-extension ###
|
||||
|
||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||
|
||||
@@ -547,10 +547,8 @@ Here are the standard options specific to drive (Google Drive).
|
||||
|
||||
#### --drive-client-id
|
||||
|
||||
Google Application Client Id
|
||||
Setting your own is recommended.
|
||||
See https://rclone.org/drive/#making-your-own-client-id for how to create your own.
|
||||
If you leave this blank, it will use an internal key which is low performance.
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_DRIVE_CLIENT_ID
|
||||
|
||||
@@ -202,6 +202,39 @@ Impersonate this user when using a business account.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --dropbox-shared-files
|
||||
|
||||
Instructs rclone to work on individual shared files.
|
||||
|
||||
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.
|
||||
|
||||
- Config: shared_files
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FILES
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --dropbox-shared-folders
|
||||
|
||||
Instructs rclone to work on shared folders.
|
||||
|
||||
When this flag is used with no path only the List operation is supported and
|
||||
all available shared folders will be listed. If you specify a path the first part
|
||||
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||
The shared folder is now pretty much a normal folder and all normal operations
|
||||
are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.
|
||||
|
||||
- Config: shared_folders
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FOLDERS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --dropbox-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -147,7 +147,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.3")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -246,7 +246,7 @@ and may be set in the config file.
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-auth-url string Auth server URL.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-id string OAuth Client Id
|
||||
--drive-client-secret string OAuth Client Secret
|
||||
--drive-disable-http2 Disable drive using http2 (default true)
|
||||
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
|
||||
|
||||
@@ -139,7 +139,7 @@ Choose a number from below, or type in your own value
|
||||
/ Asia Pacific (Mumbai)
|
||||
13 | Needs location constraint ap-south-1.
|
||||
\ "ap-south-1"
|
||||
/ Asia Pacific (Hong Kong) Region
|
||||
/ Asia Patific (Hong Kong) Region
|
||||
14 | Needs location constraint ap-east-1.
|
||||
\ "ap-east-1"
|
||||
/ South America (Sao Paulo) Region
|
||||
@@ -456,7 +456,7 @@ Vault API, so rclone cannot directly access Glacier Vaults.
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
|
||||
#### --s3-provider
|
||||
|
||||
@@ -487,10 +487,10 @@ Choose your S3 provider.
|
||||
- Scaleway Object Storage
|
||||
- "StackPath"
|
||||
- StackPath Object Storage
|
||||
- "TencentCOS"
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- "Wasabi"
|
||||
- Wasabi Object Storage
|
||||
- "TencentCOS"
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- "Other"
|
||||
- Any other S3 compatible provider
|
||||
|
||||
@@ -545,12 +545,12 @@ Region to connect to.
|
||||
- "us-east-2"
|
||||
- US East (Ohio) Region
|
||||
- Needs location constraint us-east-2.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region
|
||||
- Needs location constraint us-west-1.
|
||||
- "us-west-2"
|
||||
- US West (Oregon) Region
|
||||
- Needs location constraint us-west-2.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region
|
||||
- Needs location constraint us-west-1.
|
||||
- "ca-central-1"
|
||||
- Canada (Central) Region
|
||||
- Needs location constraint ca-central-1.
|
||||
@@ -560,15 +560,9 @@ Region to connect to.
|
||||
- "eu-west-2"
|
||||
- EU (London) Region
|
||||
- Needs location constraint eu-west-2.
|
||||
- "eu-west-3"
|
||||
- EU (Paris) Region
|
||||
- Needs location constraint eu-west-3.
|
||||
- "eu-north-1"
|
||||
- EU (Stockholm) Region
|
||||
- Needs location constraint eu-north-1.
|
||||
- "eu-south-1"
|
||||
- EU (Milan) Region
|
||||
- Needs location constraint eu-south-1.
|
||||
- "eu-central-1"
|
||||
- EU (Frankfurt) Region
|
||||
- Needs location constraint eu-central-1.
|
||||
@@ -584,36 +578,15 @@ Region to connect to.
|
||||
- "ap-northeast-2"
|
||||
- Asia Pacific (Seoul)
|
||||
- Needs location constraint ap-northeast-2.
|
||||
- "ap-northeast-3"
|
||||
- Asia Pacific (Osaka-Local)
|
||||
- Needs location constraint ap-northeast-3.
|
||||
- "ap-south-1"
|
||||
- Asia Pacific (Mumbai)
|
||||
- Needs location constraint ap-south-1.
|
||||
- "ap-east-1"
|
||||
- Asia Pacific (Hong Kong) Region
|
||||
- Asia Patific (Hong Kong) Region
|
||||
- Needs location constraint ap-east-1.
|
||||
- "sa-east-1"
|
||||
- South America (Sao Paulo) Region
|
||||
- Needs location constraint sa-east-1.
|
||||
- "me-south-1"
|
||||
- Middle East (Bahrain) Region
|
||||
- Needs location constraint me-south-1.
|
||||
- "af-south-1"
|
||||
- Africa (Cape Town) Region
|
||||
- Needs location constraint af-south-1.
|
||||
- "cn-north-1"
|
||||
- China (Beijing) Region
|
||||
- Needs location constraint cn-north-1.
|
||||
- "cn-northwest-1"
|
||||
- China (Ningxia) Region
|
||||
- Needs location constraint cn-northwest-1.
|
||||
- "us-gov-east-1"
|
||||
- AWS GovCloud (US-East) Region
|
||||
- Needs location constraint us-gov-east-1.
|
||||
- "us-gov-west-1"
|
||||
- AWS GovCloud (US) Region
|
||||
- Needs location constraint us-gov-west-1.
|
||||
|
||||
#### --s3-region
|
||||
|
||||
@@ -869,54 +842,6 @@ Endpoint for StackPath Object Storage.
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for Tencent COS API.
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "cos.ap-beijing.myqcloud.com"
|
||||
- Beijing Region.
|
||||
- "cos.ap-nanjing.myqcloud.com"
|
||||
- Nanjing Region.
|
||||
- "cos.ap-shanghai.myqcloud.com"
|
||||
- Shanghai Region.
|
||||
- "cos.ap-guangzhou.myqcloud.com"
|
||||
- Guangzhou Region.
|
||||
- "cos.ap-nanjing.myqcloud.com"
|
||||
- Nanjing Region.
|
||||
- "cos.ap-chengdu.myqcloud.com"
|
||||
- Chengdu Region.
|
||||
- "cos.ap-chongqing.myqcloud.com"
|
||||
- Chongqing Region.
|
||||
- "cos.ap-hongkong.myqcloud.com"
|
||||
- Hong Kong (China) Region.
|
||||
- "cos.ap-singapore.myqcloud.com"
|
||||
- Singapore Region.
|
||||
- "cos.ap-mumbai.myqcloud.com"
|
||||
- Mumbai Region.
|
||||
- "cos.ap-seoul.myqcloud.com"
|
||||
- Seoul Region.
|
||||
- "cos.ap-bangkok.myqcloud.com"
|
||||
- Bangkok Region.
|
||||
- "cos.ap-tokyo.myqcloud.com"
|
||||
- Tokyo Region.
|
||||
- "cos.na-siliconvalley.myqcloud.com"
|
||||
- Silicon Valley Region.
|
||||
- "cos.na-ashburn.myqcloud.com"
|
||||
- Virginia Region.
|
||||
- "cos.na-toronto.myqcloud.com"
|
||||
- Toronto Region.
|
||||
- "cos.eu-frankfurt.myqcloud.com"
|
||||
- Frankfurt Region.
|
||||
- "cos.eu-moscow.myqcloud.com"
|
||||
- Moscow Region.
|
||||
- "cos.accelerate.myqcloud.com"
|
||||
- Use Tencent COS Accelerate Endpoint.
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
|
||||
@@ -954,22 +879,18 @@ Used when creating buckets only.
|
||||
- Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
- "us-east-2"
|
||||
- US East (Ohio) Region.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region.
|
||||
- "us-west-2"
|
||||
- US West (Oregon) Region.
|
||||
- "us-west-1"
|
||||
- US West (Northern California) Region.
|
||||
- "ca-central-1"
|
||||
- Canada (Central) Region.
|
||||
- "eu-west-1"
|
||||
- EU (Ireland) Region.
|
||||
- "eu-west-2"
|
||||
- EU (London) Region.
|
||||
- "eu-west-3"
|
||||
- EU (Paris) Region.
|
||||
- "eu-north-1"
|
||||
- EU (Stockholm) Region.
|
||||
- "eu-south-1"
|
||||
- EU (Milan) Region.
|
||||
- "EU"
|
||||
- EU Region.
|
||||
- "ap-southeast-1"
|
||||
@@ -979,27 +900,13 @@ Used when creating buckets only.
|
||||
- "ap-northeast-1"
|
||||
- Asia Pacific (Tokyo) Region.
|
||||
- "ap-northeast-2"
|
||||
- Asia Pacific (Seoul) Region.
|
||||
- "ap-northeast-3"
|
||||
- Asia Pacific (Osaka-Local) Region.
|
||||
- Asia Pacific (Seoul)
|
||||
- "ap-south-1"
|
||||
- Asia Pacific (Mumbai) Region.
|
||||
- Asia Pacific (Mumbai)
|
||||
- "ap-east-1"
|
||||
- Asia Pacific (Hong Kong) Region.
|
||||
- Asia Pacific (Hong Kong)
|
||||
- "sa-east-1"
|
||||
- South America (Sao Paulo) Region.
|
||||
- "me-south-1"
|
||||
- Middle East (Bahrain) Region.
|
||||
- "af-south-1"
|
||||
- Africa (Cape Town) Region.
|
||||
- "cn-north-1"
|
||||
- China (Beijing) Region
|
||||
- "cn-northwest-1"
|
||||
- China (Ningxia) Region.
|
||||
- "us-gov-east-1"
|
||||
- AWS GovCloud (US-East) Region.
|
||||
- "us-gov-west-1"
|
||||
- AWS GovCloud (US) Region.
|
||||
|
||||
#### --s3-location-constraint
|
||||
|
||||
@@ -1102,8 +1009,6 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "default"
|
||||
- Owner gets Full_CONTROL. No one else has access rights (default).
|
||||
- "private"
|
||||
- Owner gets FULL_CONTROL. No one else has access rights (default).
|
||||
- "public-read"
|
||||
@@ -1204,24 +1109,6 @@ The storage class to use when storing new objects in OSS.
|
||||
|
||||
#### --s3-storage-class
|
||||
|
||||
The storage class to use when storing new objects in Tencent COS.
|
||||
|
||||
- Config: storage_class
|
||||
- Env Var: RCLONE_S3_STORAGE_CLASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- ""
|
||||
- Default
|
||||
- "STANDARD"
|
||||
- Standard storage class
|
||||
- "ARCHIVE"
|
||||
- Archive storage mode.
|
||||
- "STANDARD_IA"
|
||||
- Infrequent access storage mode.
|
||||
|
||||
#### --s3-storage-class
|
||||
|
||||
The storage class to use when storing new objects in S3.
|
||||
|
||||
- Config: storage_class
|
||||
@@ -1459,7 +1346,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
||||
false - rclone will do this automatically based on the provider
|
||||
setting.
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.53.3
|
||||
v1.54.0
|
||||
@@ -26,6 +26,10 @@ var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set
|
||||
// transfer limit is reached.
|
||||
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
|
||||
|
||||
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
|
||||
// transfer limit is reached and a graceful stop is required.
|
||||
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
|
||||
|
||||
// Account limits and accounts for one transfer
|
||||
type Account struct {
|
||||
stats *StatsInfo
|
||||
|
||||
@@ -366,8 +366,6 @@ func (sg *statsGroups) sum() *StatsInfo {
|
||||
sum.lastError = stats.lastError
|
||||
}
|
||||
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
||||
sum.oldDuration += stats.oldDuration
|
||||
sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...)
|
||||
}
|
||||
stats.mu.RUnlock()
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStatsGroupOperations(t *testing.T) {
|
||||
@@ -45,26 +43,17 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||
t.Parallel()
|
||||
stats1 := NewStats()
|
||||
stats1.bytes = 5
|
||||
stats1.errors = 6
|
||||
stats1.oldDuration = time.Second
|
||||
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
|
||||
stats1.errors = 5
|
||||
stats2 := NewStats()
|
||||
stats2.bytes = 10
|
||||
stats2.errors = 12
|
||||
stats2.oldDuration = 2 * time.Second
|
||||
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
|
||||
sg := newStatsGroups()
|
||||
sg.set("test1", stats1)
|
||||
sg.set("test2", stats2)
|
||||
sum := sg.sum()
|
||||
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
|
||||
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
|
||||
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
|
||||
// dict can iterate in either order
|
||||
a := timeRanges{stats1.oldTimeRanges[0], stats2.oldTimeRanges[0]}
|
||||
b := timeRanges{stats2.oldTimeRanges[0], stats1.oldTimeRanges[0]}
|
||||
if !assert.ObjectsAreEqual(a, sum.oldTimeRanges) {
|
||||
assert.Equal(t, b, sum.oldTimeRanges)
|
||||
if sum.bytes != stats1.bytes+stats2.bytes {
|
||||
t.Fatalf("sum() => bytes %d, expected %d", sum.bytes, stats1.bytes+stats2.bytes)
|
||||
}
|
||||
if sum.errors != stats1.errors+stats2.errors {
|
||||
t.Fatalf("sum() => errors %d, expected %d", sum.errors, stats1.errors+stats2.errors)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -72,16 +72,8 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
||||
for _, tr := range tm.items {
|
||||
s = append(s, tr)
|
||||
}
|
||||
// sort by time first and if equal by name. Note that the relatively
|
||||
// low time resolution on Windows can cause equal times.
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
a, b := s[i], s[j]
|
||||
if a.startedAt.Before(b.startedAt) {
|
||||
return true
|
||||
} else if !a.startedAt.Equal(b.startedAt) {
|
||||
return false
|
||||
}
|
||||
return a.remote < b.remote
|
||||
return s[i].startedAt.Before(s[j].startedAt)
|
||||
})
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -172,12 +172,9 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
|
||||
return false
|
||||
}
|
||||
if ht == hash.None {
|
||||
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
|
||||
if common.Count() == 0 {
|
||||
checksumWarning.Do(func() {
|
||||
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
|
||||
})
|
||||
}
|
||||
checksumWarning.Do(func() {
|
||||
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
|
||||
})
|
||||
fs.Debugf(src, "Size of src and dst objects identical")
|
||||
} else {
|
||||
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
|
||||
@@ -366,7 +363,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
actionTaken = "Copied (server side copy)"
|
||||
if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) ||
|
||||
(fs.Config.CutoffMode == fs.CutoffModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) {
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedFatal
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedGraceful
|
||||
}
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
in := tr.Account(ctx, nil) // account the transfer
|
||||
@@ -1525,11 +1522,12 @@ func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if fs.Config.Suffix != "" {
|
||||
} else {
|
||||
if srcFileName == "" {
|
||||
return nil, fserrors.FatalError(errors.New("--suffix must be used with a file or with --backup-dir"))
|
||||
}
|
||||
// --backup-dir is not set but --suffix is - use the destination as the backupDir
|
||||
backupDir = fdst
|
||||
} else {
|
||||
return nil, fserrors.FatalError(errors.New("internal error: BackupDir called when --backup-dir and --suffix both empty"))
|
||||
}
|
||||
if !CanServerSideMove(backupDir) {
|
||||
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
|
||||
|
||||
@@ -1440,7 +1440,7 @@ func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file3.Path, file3.Path)
|
||||
require.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "Max transfer limit reached")
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
assert.True(t, fserrors.IsNoRetryError(err))
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3, file4)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
|
||||
@@ -59,17 +59,30 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
|
||||
return errors.New("Web GUI path exists, but is a file instead of folder. Please check the path " + extractPath)
|
||||
}
|
||||
|
||||
// Get the latest release details
|
||||
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error checking for web gui release update, skipping update")
|
||||
}
|
||||
dat, err := ioutil.ReadFile(tagPath)
|
||||
tagsMatch := false
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error reading tag file at %s ", tagPath)
|
||||
checkUpdate = true
|
||||
} else if string(dat) == tag {
|
||||
tagsMatch = true
|
||||
}
|
||||
fs.Debugf(nil, "Current tag: %s, Release tag: %s", string(dat), tag)
|
||||
|
||||
if !tagsMatch {
|
||||
fs.Infof(nil, "A release (%s) for gui is present at %s. Use --rc-web-gui-update to update. Your current version is (%s)", tag, WebUIURL, string(dat))
|
||||
}
|
||||
|
||||
// if the old file exists does not exist or forced update is enforced.
|
||||
// TODO: Add hashing to check integrity of the previous update.
|
||||
if !extractPathExist || checkUpdate || forceUpdate {
|
||||
// Get the latest release details
|
||||
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(tagPath)
|
||||
if err == nil && string(dat) == tag {
|
||||
if tagsMatch {
|
||||
fs.Logf(nil, "No update to Web GUI available.")
|
||||
if !forceUpdate {
|
||||
return nil
|
||||
@@ -91,7 +104,7 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
|
||||
return errors.New("Web GUI path is a file instead of folder. Please check it " + extractPath)
|
||||
}
|
||||
|
||||
fs.Logf(nil, "A new release for gui is present at "+WebUIURL)
|
||||
fs.Logf(nil, "A new release for gui (%s) is present at %s", tag, WebUIURL)
|
||||
fs.Logf(nil, "Downloading webgui binary. Please wait. [Size: %s, Path : %s]\n", strconv.Itoa(size), zipPath)
|
||||
|
||||
// download the zip from latest url
|
||||
|
||||
@@ -32,6 +32,8 @@ type syncCopyMove struct {
|
||||
// internal state
|
||||
ctx context.Context // internal context for controlling go-routines
|
||||
cancel func() // cancel the context
|
||||
inCtx context.Context // internal context for controlling march
|
||||
inCancel func() // cancel the march context
|
||||
noTraverse bool // if set don't traverse the dst
|
||||
noCheckDest bool // if set transfer all objects regardless without checking dst
|
||||
noUnicodeNormalization bool // don't normalize unicode characters in filenames
|
||||
@@ -144,6 +146,8 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(ctx)
|
||||
}
|
||||
// Input context - cancel this for graceful stop
|
||||
s.inCtx, s.inCancel = context.WithCancel(s.ctx)
|
||||
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
@@ -248,6 +252,12 @@ func (s *syncCopyMove) processError(err error) {
|
||||
}
|
||||
if err == context.DeadlineExceeded {
|
||||
err = fserrors.NoRetryError(err)
|
||||
} else if err == accounting.ErrorMaxTransferLimitReachedGraceful {
|
||||
if s.inCtx.Err() == nil {
|
||||
fs.Logf(nil, "%v - stopping transfers", err)
|
||||
// Cancel the march and stop the pipes
|
||||
s.inCancel()
|
||||
}
|
||||
}
|
||||
s.errorMu.Lock()
|
||||
defer s.errorMu.Unlock()
|
||||
@@ -287,7 +297,7 @@ func (s *syncCopyMove) currentError() error {
|
||||
func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.GetMax(s.ctx, fraction)
|
||||
pair, ok := in.GetMax(s.inCtx, fraction)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -343,7 +353,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
|
||||
func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.GetMax(s.ctx, fraction)
|
||||
pair, ok := in.GetMax(s.inCtx, fraction)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -363,7 +373,7 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs,
|
||||
defer wg.Done()
|
||||
var err error
|
||||
for {
|
||||
pair, ok := in.GetMax(s.ctx, fraction)
|
||||
pair, ok := in.GetMax(s.inCtx, fraction)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -809,7 +819,7 @@ func (s *syncCopyMove) run() error {
|
||||
|
||||
// set up a march over fdst and fsrc
|
||||
m := &march.March{
|
||||
Ctx: s.ctx,
|
||||
Ctx: s.inCtx,
|
||||
Fdst: s.fdst,
|
||||
Fsrc: s.fsrc,
|
||||
Dir: s.dir,
|
||||
|
||||
@@ -1590,7 +1590,7 @@ func TestSyncCopyDest(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test with BackupDir set
|
||||
func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeepExtension bool) {
|
||||
func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@@ -1599,23 +1599,7 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
|
||||
}
|
||||
r.Mkdir(context.Background(), r.Fremote)
|
||||
|
||||
if backupDir != "" {
|
||||
fs.Config.BackupDir = r.FremoteName + "/" + backupDir
|
||||
backupDir += "/"
|
||||
} else {
|
||||
fs.Config.BackupDir = ""
|
||||
backupDir = "dst/"
|
||||
// Exclude the suffix from the sync otherwise the sync
|
||||
// deletes the old backup files
|
||||
flt, err := filter.NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, flt.AddRule("- *"+suffix))
|
||||
oldFlt := filter.Active
|
||||
filter.Active = flt
|
||||
defer func() {
|
||||
filter.Active = oldFlt
|
||||
}()
|
||||
}
|
||||
fs.Config.BackupDir = r.FremoteName + "/backup"
|
||||
fs.Config.Suffix = suffix
|
||||
fs.Config.SuffixKeepExtension = suffixKeepExtension
|
||||
defer func() {
|
||||
@@ -1643,14 +1627,14 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
file1.Path = backupDir + "one" + suffix
|
||||
file1.Path = "backup/one" + suffix
|
||||
file1a.Path = "dst/one"
|
||||
// two should be unchanged
|
||||
// three should be moved to the backup dir
|
||||
if suffixKeepExtension {
|
||||
file3.Path = backupDir + "three" + suffix + ".txt"
|
||||
file3.Path = "backup/three" + suffix + ".txt"
|
||||
} else {
|
||||
file3.Path = backupDir + "three.txt" + suffix
|
||||
file3.Path = "backup/three.txt" + suffix
|
||||
}
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
|
||||
@@ -1668,29 +1652,22 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
file1a.Path = backupDir + "one" + suffix
|
||||
file1a.Path = "backup/one" + suffix
|
||||
file1b.Path = "dst/one"
|
||||
// two should be unchanged
|
||||
// three should be moved to the backup dir
|
||||
if suffixKeepExtension {
|
||||
file3a.Path = backupDir + "three" + suffix + ".txt"
|
||||
file3a.Path = "backup/three" + suffix + ".txt"
|
||||
} else {
|
||||
file3a.Path = backupDir + "three.txt" + suffix
|
||||
file3a.Path = "backup/three.txt" + suffix
|
||||
}
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
|
||||
}
|
||||
func TestSyncBackupDir(t *testing.T) {
|
||||
testSyncBackupDir(t, "backup", "", false)
|
||||
}
|
||||
func TestSyncBackupDirWithSuffix(t *testing.T) {
|
||||
testSyncBackupDir(t, "backup", ".bak", false)
|
||||
}
|
||||
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "", false) }
|
||||
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak", false) }
|
||||
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) {
|
||||
testSyncBackupDir(t, "backup", "-2019-01-01", true)
|
||||
}
|
||||
func TestSyncBackupDirSuffixOnly(t *testing.T) {
|
||||
testSyncBackupDir(t, "", ".bak", false)
|
||||
testSyncBackupDir(t, "-2019-01-01", true)
|
||||
}
|
||||
|
||||
// Test with Suffix set
|
||||
@@ -1874,38 +1851,51 @@ func TestSyncIgnoreCase(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
}
|
||||
|
||||
// Test that aborting on max upload works
|
||||
func TestAbort(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
if r.Fremote.Name() != "local" {
|
||||
t.Skip("This test only runs on local")
|
||||
}
|
||||
|
||||
// Test that aborting on --max-transfer works
|
||||
func TestMaxTransfer(t *testing.T) {
|
||||
oldMaxTransfer := fs.Config.MaxTransfer
|
||||
oldTransfers := fs.Config.Transfers
|
||||
oldCheckers := fs.Config.Checkers
|
||||
oldCutoff := fs.Config.CutoffMode
|
||||
fs.Config.MaxTransfer = 3 * 1024
|
||||
fs.Config.Transfers = 1
|
||||
fs.Config.Checkers = 1
|
||||
fs.Config.CutoffMode = fs.CutoffModeHard
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = oldMaxTransfer
|
||||
fs.Config.Transfers = oldTransfers
|
||||
fs.Config.Checkers = oldCheckers
|
||||
fs.Config.CutoffMode = oldCutoff
|
||||
}()
|
||||
|
||||
// Create file on source
|
||||
file1 := r.WriteFile("file1", string(make([]byte, 5*1024)), t1)
|
||||
file2 := r.WriteFile("file2", string(make([]byte, 2*1024)), t1)
|
||||
file3 := r.WriteFile("file3", string(make([]byte, 3*1024)), t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
test := func(t *testing.T, cutoff fs.CutoffMode) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.CutoffMode = cutoff
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
if r.Fremote.Name() != "local" {
|
||||
t.Skip("This test only runs on local")
|
||||
}
|
||||
|
||||
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
|
||||
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal)
|
||||
fserrors.Count(expectedErr)
|
||||
assert.Equal(t, expectedErr, err)
|
||||
// Create file on source
|
||||
file1 := r.WriteFile("file1", string(make([]byte, 5*1024)), t1)
|
||||
file2 := r.WriteFile("file2", string(make([]byte, 2*1024)), t1)
|
||||
file3 := r.WriteFile("file3", string(make([]byte, 3*1024)), t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
|
||||
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal)
|
||||
if cutoff != fs.CutoffModeHard {
|
||||
expectedErr = accounting.ErrorMaxTransferLimitReachedGraceful
|
||||
}
|
||||
fserrors.Count(expectedErr)
|
||||
assert.Equal(t, expectedErr, err)
|
||||
}
|
||||
|
||||
t.Run("Hard", func(t *testing.T) { test(t, fs.CutoffModeHard) })
|
||||
t.Run("Soft", func(t *testing.T) { test(t, fs.CutoffModeSoft) })
|
||||
t.Run("Cautious", func(t *testing.T) { test(t, fs.CutoffModeCautious) })
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.53.3-DEV"
|
||||
var Version = "v1.54.0-DEV"
|
||||
|
||||
@@ -27,16 +27,15 @@ type Test struct {
|
||||
//
|
||||
// FIXME make bucket based remotes set sub-dir automatically???
|
||||
type Backend struct {
|
||||
Backend string // name of the backend directory
|
||||
Remote string // name of the test remote
|
||||
FastList bool // set to test with -fast-list
|
||||
Short bool // set to test with -short
|
||||
OneOnly bool // set to run only one backend test at once
|
||||
MaxFile string // file size limit
|
||||
CleanUp bool // when running clean, run cleanup first
|
||||
Ignore []string // test names to ignore the failure of
|
||||
Tests []string // paths of tests to run, blank for all
|
||||
ListRetries int // -list-retries if > 0
|
||||
Backend string // name of the backend directory
|
||||
Remote string // name of the test remote
|
||||
FastList bool // set to test with -fast-list
|
||||
Short bool // set to test with -short
|
||||
OneOnly bool // set to run only one backend test at once
|
||||
MaxFile string // file size limit
|
||||
CleanUp bool // when running clean, run cleanup first
|
||||
Ignore []string // test names to ignore the failure of
|
||||
Tests []string // paths of tests to run, blank for all
|
||||
}
|
||||
|
||||
// includeTest returns true if this backend should be included in this
|
||||
@@ -80,17 +79,16 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
|
||||
continue
|
||||
}
|
||||
run := &Run{
|
||||
Remote: b.Remote,
|
||||
Backend: b.Backend,
|
||||
Path: t.Path,
|
||||
FastList: fastlist,
|
||||
Short: (b.Short && t.Short),
|
||||
NoRetries: t.NoRetries,
|
||||
OneOnly: b.OneOnly,
|
||||
NoBinary: t.NoBinary,
|
||||
SizeLimit: int64(maxSize),
|
||||
Ignore: ignore,
|
||||
ListRetries: b.ListRetries,
|
||||
Remote: b.Remote,
|
||||
Backend: b.Backend,
|
||||
Path: t.Path,
|
||||
FastList: fastlist,
|
||||
Short: (b.Short && t.Short),
|
||||
NoRetries: t.NoRetries,
|
||||
OneOnly: b.OneOnly,
|
||||
NoBinary: t.NoBinary,
|
||||
SizeLimit: int64(maxSize),
|
||||
Ignore: ignore,
|
||||
}
|
||||
if t.AddBackend {
|
||||
run.Path = path.Join(run.Path, b.Backend)
|
||||
|
||||
@@ -20,7 +20,6 @@ backends:
|
||||
- backend: "b2"
|
||||
remote: "TestB2:"
|
||||
fastlist: true
|
||||
listretries: 5
|
||||
- backend: "crypt"
|
||||
remote: "TestCryptDrive:"
|
||||
fastlist: true
|
||||
@@ -42,13 +41,15 @@ backends:
|
||||
remote: "TestChunkerChunk3bNometaLocal:"
|
||||
fastlist: true
|
||||
maxfile: 6k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerMailru:"
|
||||
fastlist: true
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bMailru:"
|
||||
fastlist: true
|
||||
maxfile: 10k
|
||||
# Disable chunker with mailru tests until Mailru is fixed - see
|
||||
# https://github.com/rclone/rclone/issues/4376
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerMailru:"
|
||||
# fastlist: true
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerChunk50bMailru:"
|
||||
# fastlist: true
|
||||
# maxfile: 10k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bYandex:"
|
||||
fastlist: true
|
||||
@@ -72,10 +73,6 @@ backends:
|
||||
remote: "TestChunkerChunk50bSHA1HashS3:"
|
||||
fastlist: true
|
||||
maxfile: 1k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerOverCrypt:"
|
||||
fastlist: true
|
||||
maxfile: 6k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bMD5QuickS3:"
|
||||
fastlist: true
|
||||
@@ -147,12 +144,12 @@ backends:
|
||||
# ignore:
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||
# - backend: "s3"
|
||||
# remote: "TestS3Ceph:"
|
||||
# fastlist: true
|
||||
# ignore:
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||
- backend: "s3"
|
||||
remote: "TestS3Ceph:"
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||
- backend: "s3"
|
||||
remote: "TestS3Alibaba:"
|
||||
fastlist: true
|
||||
@@ -173,11 +170,11 @@ backends:
|
||||
- backend: "swift"
|
||||
remote: "TestSwift:"
|
||||
fastlist: true
|
||||
# - backend: "swift"
|
||||
# remote: "TestSwiftCeph:"
|
||||
# fastlist: true
|
||||
# ignore:
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- backend: "swift"
|
||||
remote: "TestSwiftCeph:"
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- backend: "yandex"
|
||||
remote: "TestYandex:"
|
||||
fastlist: false
|
||||
|
||||
@@ -35,17 +35,16 @@ var (
|
||||
// if retries are needed.
|
||||
type Run struct {
|
||||
// Config
|
||||
Remote string // name of the test remote
|
||||
Backend string // name of the backend
|
||||
Path string // path to the source directory
|
||||
FastList bool // add -fast-list to tests
|
||||
Short bool // add -short
|
||||
NoRetries bool // don't retry if set
|
||||
OneOnly bool // only run test for this backend at once
|
||||
NoBinary bool // set to not build a binary
|
||||
SizeLimit int64 // maximum test file size
|
||||
Ignore map[string]struct{}
|
||||
ListRetries int // -list-retries if > 0
|
||||
Remote string // name of the test remote
|
||||
Backend string // name of the backend
|
||||
Path string // path to the source directory
|
||||
FastList bool // add -fast-list to tests
|
||||
Short bool // add -short
|
||||
NoRetries bool // don't retry if set
|
||||
OneOnly bool // only run test for this backend at once
|
||||
NoBinary bool // set to not build a binary
|
||||
SizeLimit int64 // maximum test file size
|
||||
Ignore map[string]struct{}
|
||||
// Internals
|
||||
CmdLine []string
|
||||
CmdString string
|
||||
@@ -337,12 +336,8 @@ func (r *Run) Init() {
|
||||
r.CmdLine = []string{"./" + r.BinaryName()}
|
||||
}
|
||||
r.CmdLine = append(r.CmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
|
||||
listRetries := *listRetries
|
||||
if r.ListRetries > 0 {
|
||||
listRetries = r.ListRetries
|
||||
}
|
||||
if listRetries > 0 {
|
||||
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries))
|
||||
if *listRetries > 0 {
|
||||
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(*listRetries))
|
||||
}
|
||||
r.Try = 1
|
||||
if *verbose {
|
||||
|
||||
2
go.mod
2
go.mod
@@ -72,3 +72,5 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
|
||||
storj.io/uplink v1.2.0
|
||||
)
|
||||
|
||||
replace github.com/jlaffaye/ftp => github.com/ncw/ftp v0.0.0-20200913100848-1d4a278dbc96
|
||||
|
||||
12
go.sum
12
go.sum
@@ -238,6 +238,8 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M
|
||||
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e h1:itZyHiOkiB8mIGouegRNLM9LttGQ3yrgRmp/J/6H/0g=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200812143550-39e3779af0db h1:e30IC+OuZIeMVK33/zE7wDvxDaRmGuRt/ps67pzcxAw=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200812143550-39e3779af0db/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
@@ -306,6 +308,16 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/ncw/ftp v0.0.0-20200910202626-83bf7f3051fe h1:vJHusE940z05SKcdSy67YvPeZi9DkV7k1awGFKhCwXs=
|
||||
github.com/ncw/ftp v0.0.0-20200910202626-83bf7f3051fe/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/ncw/ftp v0.0.0-20200911161945-13c89676f4e4 h1:eLu30+/U5nLCkFegOUsGTFJ7wojikvdNESGEmA3ZnuM=
|
||||
github.com/ncw/ftp v0.0.0-20200911161945-13c89676f4e4/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/ncw/ftp v0.0.0-20200912103546-f95f53b8ef3e h1:UUzL92dVtNe6OQfRo+xxubN4Mj+chJro87Qa0bM+3Kg=
|
||||
github.com/ncw/ftp v0.0.0-20200912103546-f95f53b8ef3e/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/ncw/ftp v0.0.0-20200913095915-9bc640dd2108 h1:SBBlbVBRqSAb4KBfpQkevTEAmWOsX8exrX3XxNY/QOQ=
|
||||
github.com/ncw/ftp v0.0.0-20200913095915-9bc640dd2108/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/ncw/ftp v0.0.0-20200913100848-1d4a278dbc96 h1:DIvfe9Wqr8cuJulz8+guLCGyZRO22MDRjCk4fTvJ+7U=
|
||||
github.com/ncw/ftp v0.0.0-20200913100848-1d4a278dbc96/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU=
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
package random
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
mathrand "math/rand"
|
||||
"math/rand"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -25,7 +23,7 @@ func String(n int) string {
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[mathrand.Intn(len(source))]
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
@@ -43,7 +41,7 @@ func Password(bits int) (password string, err error) {
|
||||
bytes++
|
||||
}
|
||||
var pw = make([]byte, bytes)
|
||||
n, err := cryptorand.Read(pw)
|
||||
n, err := rand.Read(pw)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "password read failed")
|
||||
}
|
||||
@@ -53,19 +51,3 @@ func Password(bits int) (password string, err error) {
|
||||
password = base64.RawURLEncoding.EncodeToString(pw)
|
||||
return password, nil
|
||||
}
|
||||
|
||||
// Seed the global math/rand with crypto strong data
|
||||
//
|
||||
// This doesn't make it OK to use math/rand in crypto sensitive
|
||||
// environments - don't do that! However it does help to mitigate the
|
||||
// problem if that happens accidentally. This would have helped with
|
||||
// CVE-2020-28924 - #4783
|
||||
func Seed() error {
|
||||
var seed int64
|
||||
err := binary.Read(cryptorand.Reader, binary.LittleEndian, &seed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read random seed")
|
||||
}
|
||||
mathrand.Seed(seed)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package random
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -49,16 +48,3 @@ func TestPasswordDuplicates(t *testing.T) {
|
||||
seen[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeed(t *testing.T) {
|
||||
// seed 100 times and check the first random number doesn't repeat
|
||||
// This test could fail with a probability of ~ 10**-15
|
||||
const n = 100
|
||||
var seen = map[int64]bool{}
|
||||
for i := 0; i < n; i++ {
|
||||
assert.NoError(t, Seed())
|
||||
first := rand.Int63()
|
||||
assert.False(t, seen[first])
|
||||
seen[first] = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -342,14 +342,9 @@ func (f *File) Size() int64 {
|
||||
}
|
||||
|
||||
// SetModTime sets the modtime for the file
|
||||
//
|
||||
// if NoModTime is set then it does nothing
|
||||
func (f *File) SetModTime(modTime time.Time) error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.d.vfs.Opt.NoModTime {
|
||||
return nil
|
||||
}
|
||||
if f.d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
|
||||
@@ -23,8 +23,6 @@ must be supplied.`
|
||||
//
|
||||
// If "fs" is not set and there is one and only one VFS in the active
|
||||
// cache then it returns it. This is for backwards compatibility.
|
||||
//
|
||||
// This deletes the "fs" parameter from in if it is valid
|
||||
func getVFS(in rc.Params) (vfs *VFS, err error) {
|
||||
fsString, err := in.GetString("fs")
|
||||
if rc.IsErrParamNotFound(err) {
|
||||
@@ -48,7 +46,6 @@ func getVFS(in rc.Params) (vfs *VFS, err error) {
|
||||
} else if len(activeVFS) > 1 {
|
||||
return nil, errors.Errorf("more than one VFS active with name %q", fsString)
|
||||
}
|
||||
delete(in, "fs") // delete the fs parameter
|
||||
return activeVFS[0], nil
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,6 @@ func TestRcGetVFS(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "more than one VFS active - need")
|
||||
assert.Nil(t, vfs)
|
||||
|
||||
inPresent = rc.Params{"fs": fs.ConfigString(r.Fremote)}
|
||||
vfs, err = getVFS(inPresent)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "more than one VFS active with name")
|
||||
@@ -68,8 +67,7 @@ func TestRcForget(t *testing.T) {
|
||||
r, vfs, cleanup, call := rcNewRun(t, "vfs/forget")
|
||||
defer cleanup()
|
||||
_, _ = r, vfs
|
||||
in := rc.Params{"fs": fs.ConfigString(r.Fremote)}
|
||||
out, err := call.Fn(context.Background(), in)
|
||||
out, err := call.Fn(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rc.Params{
|
||||
"forgotten": []string{},
|
||||
@@ -81,8 +79,7 @@ func TestRcRefresh(t *testing.T) {
|
||||
r, vfs, cleanup, call := rcNewRun(t, "vfs/refresh")
|
||||
defer cleanup()
|
||||
_, _ = r, vfs
|
||||
in := rc.Params{"fs": fs.ConfigString(r.Fremote)}
|
||||
out, err := call.Fn(context.Background(), in)
|
||||
out, err := call.Fn(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rc.Params{
|
||||
"result": map[string]string{
|
||||
|
||||
@@ -466,15 +466,9 @@ func (c *Cache) retryFailedResets() {
|
||||
if len(c.errItems) != 0 {
|
||||
fs.Debugf(nil, "vfs cache reset: before redoing reset errItems = %v", c.errItems)
|
||||
for itemName := range c.errItems {
|
||||
if retryItem, ok := c.item[itemName]; ok {
|
||||
_, _, err := retryItem.Reset()
|
||||
if err == nil || !fserrors.IsErrNoSpace(err) {
|
||||
// TODO: not trying to handle non-ENOSPC errors yet
|
||||
delete(c.errItems, itemName)
|
||||
}
|
||||
} else {
|
||||
// The retry item was deleted because it was closed.
|
||||
// No need to redo the failed reset now.
|
||||
_, _, err := c.item[itemName].Reset()
|
||||
if err == nil || !fserrors.IsErrNoSpace(err) {
|
||||
// TODO: not trying to handle non-ENOSPC errors yet
|
||||
delete(c.errItems, itemName)
|
||||
}
|
||||
}
|
||||
@@ -609,7 +603,7 @@ func (c *Cache) clean(removeCleanFiles bool) {
|
||||
if os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
c.updateUsed()
|
||||
|
||||
c.mu.Lock()
|
||||
oldItems, oldUsed := len(c.item), fs.SizeSuffix(c.used)
|
||||
c.mu.Unlock()
|
||||
|
||||
@@ -230,11 +230,7 @@ func (dls *Downloaders) Close(inErr error) (err error) {
|
||||
}
|
||||
}
|
||||
dls.cancel()
|
||||
// dls may have entered the periodical (every 5 seconds) kickWaiters() call
|
||||
// unlock the mutex to allow it to finish so that we can get its dls.wg.Done()
|
||||
dls.mu.Unlock()
|
||||
dls.wg.Wait()
|
||||
dls.mu.Lock()
|
||||
dls.dls = nil
|
||||
dls._dispatchWaiters()
|
||||
dls._closeWaiters(inErr)
|
||||
|
||||
@@ -43,13 +43,6 @@ import (
|
||||
// be taken before Item.mu. writeback may call into Item but Item may
|
||||
// **not** call writeback methods with Item.mu held
|
||||
|
||||
// LL Item reset is invoked by cache cleaner for synchronous recovery
|
||||
// from ENOSPC errors. The reset operation removes the cache file and
|
||||
// closes/reopens the downloaders. Although most parts of reset and
|
||||
// other item operations are done with the item mutex held, the mutex
|
||||
// is released during fd.WriteAt and downloaders calls. We use preAccess
|
||||
// and postAccess calls to serialize reset and other item operations.
|
||||
|
||||
// Item is stored in the item map
|
||||
//
|
||||
// The Info field is written to the backing store to store status
|
||||
@@ -246,23 +239,8 @@ func (item *Item) _truncate(size int64) (err error) {
|
||||
// Use open handle if available
|
||||
fd := item.fd
|
||||
if fd == nil {
|
||||
// If the metadata says we have some blockes cached then the
|
||||
// file should exist, so open without O_CREATE
|
||||
oFlags := os.O_WRONLY
|
||||
if item.info.Rs.Size() == 0 {
|
||||
oFlags |= os.O_CREATE
|
||||
}
|
||||
osPath := item.c.toOSPath(item.name) // No locking in Cache
|
||||
fd, err = file.OpenFile(osPath, oFlags, 0600)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// If the metadata has info but the file doesn't
|
||||
// not exist then it has been externally removed
|
||||
fs.Errorf(item.name, "vfs cache: detected external removal of cache file")
|
||||
item.info.Rs = nil // show we have no blocks cached
|
||||
item.info.Dirty = false // file can't be dirty if it doesn't exist
|
||||
item._removeMeta("cache file externally deleted")
|
||||
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
}
|
||||
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "vfs cache: truncate: failed to open cache file")
|
||||
}
|
||||
@@ -317,8 +295,6 @@ func (item *Item) _truncateToCurrentSize() (err error) {
|
||||
// extended and the extended data will be filled with zeros. The
|
||||
// object will be marked as dirty in this case also.
|
||||
func (item *Item) Truncate(size int64) (err error) {
|
||||
item.preAccess()
|
||||
defer item.postAccess()
|
||||
item.mu.Lock()
|
||||
defer item.mu.Unlock()
|
||||
|
||||
@@ -438,8 +414,6 @@ func (item *Item) _dirty() {
|
||||
|
||||
// Dirty marks the item as changed and needing writeback
|
||||
func (item *Item) Dirty() {
|
||||
item.preAccess()
|
||||
defer item.postAccess()
|
||||
item.mu.Lock()
|
||||
item._dirty()
|
||||
item.mu.Unlock()
|
||||
@@ -623,8 +597,6 @@ func (item *Item) store(ctx context.Context, storeFn StoreFn) (err error) {
|
||||
// Close the cache file
|
||||
func (item *Item) Close(storeFn StoreFn) (err error) {
|
||||
// defer log.Trace(item.o, "Item.Close")("err=%v", &err)
|
||||
item.preAccess()
|
||||
defer item.postAccess()
|
||||
var (
|
||||
downloaders *downloaders.Downloaders
|
||||
syncWriteBack = item.c.opt.WriteBack <= 0
|
||||
@@ -1182,7 +1154,6 @@ func (item *Item) setModTime(modTime time.Time) {
|
||||
// ReadAt bytes from the file at off
|
||||
func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
n = 0
|
||||
var expBackOff int
|
||||
for retries := 0; retries < fs.Config.LowLevelRetries; retries++ {
|
||||
item.preAccess()
|
||||
n, err = item.readAt(b, off)
|
||||
@@ -1196,12 +1167,6 @@ func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
break
|
||||
}
|
||||
item.c.KickCleaner()
|
||||
expBackOff = 2 << uint(retries)
|
||||
time.Sleep(time.Duration(expBackOff) * time.Millisecond) // Exponential back-off the retries
|
||||
}
|
||||
|
||||
if fserrors.IsErrNoSpace(err) {
|
||||
fs.Errorf(item.name, "vfs cache: failed to _ensure cache after retries %v", err)
|
||||
}
|
||||
|
||||
return n, err
|
||||
@@ -1233,8 +1198,6 @@ func (item *Item) readAt(b []byte, off int64) (n int, err error) {
|
||||
|
||||
// WriteAt bytes to the file at off
|
||||
func (item *Item) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
item.preAccess()
|
||||
defer item.postAccess()
|
||||
item.mu.Lock()
|
||||
if item.fd == nil {
|
||||
item.mu.Unlock()
|
||||
@@ -1325,8 +1288,6 @@ func (item *Item) WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, e
|
||||
// this means flushing the file system's in-memory copy of recently written
|
||||
// data to disk.
|
||||
func (item *Item) Sync() (err error) {
|
||||
item.preAccess()
|
||||
defer item.postAccess()
|
||||
item.mu.Lock()
|
||||
defer item.mu.Unlock()
|
||||
if item.fd == nil {
|
||||
@@ -1346,8 +1307,6 @@ func (item *Item) Sync() (err error) {
|
||||
|
||||
// rename the item
|
||||
func (item *Item) rename(name string, newName string, newObj fs.Object) (err error) {
|
||||
item.preAccess()
|
||||
defer item.postAccess()
|
||||
item.mu.Lock()
|
||||
|
||||
// stop downloader
|
||||
@@ -1377,5 +1336,6 @@ func (item *Item) rename(name string, newName string, newObj fs.Object) (err err
|
||||
_ = downloaders.Close(nil)
|
||||
}
|
||||
item.c.writeback.Rename(id, newName)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user