1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-25 13:53:29 +00:00

Compare commits

..

4 Commits

Author SHA1 Message Date
Anagh Kumar Baranwal
ac09c9721c Fix docs formatting 2020-09-03 16:18:56 +05:30
Nick Craig-Wood
27b9ae4fc3 vfs: fix spurious error "vfs cache: failed to _ensure cache EOF"
Before this change the error message was produced for every file which
was confusing users.

After this change we check for EOF and return from ReadAt at that
point.

See: https://forum.rclone.org/t/rclone-1-53-release/18880/10
2020-09-03 10:25:00 +01:00
Nick Craig-Wood
7e2488af10 build: include vendor tar ball in release and fix startdev 2020-09-02 17:53:05 +01:00
Nick Craig-Wood
41ecb586c4 Start v1.54.0-DEV development 2020-09-02 17:52:58 +01:00
75 changed files with 25237 additions and 34583 deletions

View File

@@ -107,10 +107,10 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
@@ -124,8 +124,6 @@ jobs:
- name: Install Libraries on macOS
shell: bash
run: |
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
brew update
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
@@ -135,10 +133,10 @@ jobs:
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
@@ -225,8 +223,8 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |

17460
MANUAL.html generated

File diff suppressed because one or more lines are too long

569
MANUAL.md generated
View File

@@ -1,6 +1,6 @@
% rclone(1) User Manual
% Nick Craig-Wood
% Oct 26, 2020
% Sep 02, 2020
# Rclone syncs your files to cloud storage
@@ -146,7 +146,6 @@ WebDAV or S3, that work out of the box.)
- StackPath
- SugarSync
- Tardigrade
- Tencent Cloud Object Storage (COS)
- Wasabi
- WebDAV
- Yandex Disk
@@ -504,7 +503,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone copy
Copy files from source to dest, skipping already copied.
Copy files from source to dest, skipping already copied
## Synopsis
@@ -834,7 +833,7 @@ the source match the files in the destination, not the other way
around. This means that extra files in the destination that are not in
the source will not be detected.
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
and `--error` flags write paths, one per line, to the file name (or
stdout if it is `-`) supplied. What they write is described in the
help below. For example `--differ` will write all paths which are
@@ -860,7 +859,6 @@ rclone check source:path dest:path [flags]
```
--combined string Make a combined report of changes to this file
--differ string Report all non-matching files to this file
--download Check by downloading rather than with hash.
--error string Report all files with errors (hashing or reading) to this file
-h, --help help for check
--match string Report all matching files to this file
@@ -1193,7 +1191,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone cleanup
Clean up the remote if possible.
Clean up the remote if possible
## Synopsis
@@ -1917,7 +1915,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone copyto
Copy files from source to dest, skipping already copied.
Copy files from source to dest, skipping already copied
## Synopsis
@@ -2042,7 +2040,7 @@ the source match the files in the destination, not the other way
around. This means that extra files in the destination that are not in
the source will not be detected.
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
and `--error` flags write paths, one per line, to the file name (or
stdout if it is `-`) supplied. What they write is described in the
help below. For example `--differ` will write all paths which are
@@ -2436,7 +2434,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone lsf
List directories and objects in remote:path formatted for parsing.
List directories and objects in remote:path formatted for parsing
## Synopsis
@@ -2746,9 +2744,6 @@ Stopping the mount manually:
# OS X
umount /path/to/local/mount
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
or newer on some platforms depending on the underlying FUSE library in use.
## Installing on Windows
To run rclone mount on Windows, you will need to
@@ -2891,6 +2886,9 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When --vfs-read-chunk-size-limit 500M is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
be copied to the vfs cache before opening with --vfs-cache-mode full.
## VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects
@@ -3054,11 +3052,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for
@@ -3296,7 +3289,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone obscure
Obscure password for use in the rclone config file.
Obscure password for use in the rclone config file
## Synopsis
@@ -3761,11 +3754,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for
@@ -4068,11 +4056,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for
@@ -4531,11 +4514,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for
@@ -5057,11 +5035,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for
@@ -5529,11 +5502,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for
@@ -6534,8 +6502,6 @@ This can be useful for tracking down problems with syncs in
combination with the `-v` flag. See the [Logging section](#logging)
for more info.
If FILE exists then rclone will append to it.
Note that if you are using the `logrotate` program to manage rclone's
logs, then you should use the `copytruncate` option as rclone doesn't
have a signal to rotate logs.
@@ -7030,17 +6996,11 @@ or with `--backup-dir`. See `--backup-dir` for more info.
For example
rclone copy -i /path/to/local/file remote:current --suffix .bak
rclone sync -i /path/to/local/file remote:current --suffix .bak
will copy `/path/to/local` to `remote:current`, but for any files
will sync `/path/to/local` to `remote:current`, but for any files
which would have been updated or deleted have .bak added.
If using `rclone sync` with `--suffix` and without `--backup-dir` then
it is recommended to put a filter rule in excluding the suffix
otherwise the `sync` will delete the backup files.
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
### --suffix-keep-extension ###
When using `--suffix`, setting this causes rclone put the SUFFIX
@@ -8933,8 +8893,6 @@ OR
"result": "<Raw command line output>"
}
```
**Authentication is required for this call.**
### core/gc: Runs a garbage collection. {#core-gc}
@@ -9610,7 +9568,7 @@ This allows you to remove a plugin using it's name
This takes parameters
- name: name of the plugin in the format `author`/`plugin_name`
- name: name of the plugin in the format <author>/<plugin_name>
Eg
@@ -9624,7 +9582,7 @@ This allows you to remove a plugin using it's name
This takes the following parameters
- name: name of the plugin in the format `author`/`plugin_name`
- name: name of the plugin in the format <author>/<plugin_name>
Eg
@@ -10569,7 +10527,7 @@ These flags are available for every command.
--use-json-log Use json log format.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.2")
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.0")
-v, --verbose count Print lots more stuff (repeat for more)
```
@@ -10668,7 +10626,7 @@ and may be set in the config file.
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-auth-url string Auth server URL.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-id string OAuth Client Id
--drive-client-secret string OAuth Client Secret
--drive-disable-http2 Disable drive using http2 (default true)
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
@@ -11517,7 +11475,6 @@ The S3 backend can be used with a number of different providers:
- Minio
- Scaleway
- StackPath
- Tencent Cloud Object Storage (COS)
- Wasabi
@@ -11638,7 +11595,7 @@ Choose a number from below, or type in your own value
/ Asia Pacific (Mumbai)
13 | Needs location constraint ap-south-1.
\ "ap-south-1"
/ Asia Pacific (Hong Kong) Region
/ Asia Patific (Hong Kong) Region
14 | Needs location constraint ap-east-1.
\ "ap-east-1"
/ South America (Sao Paulo) Region
@@ -11955,7 +11912,7 @@ Vault API, so rclone cannot directly access Glacier Vaults.
### Standard Options
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
#### --s3-provider
@@ -11986,8 +11943,6 @@ Choose your S3 provider.
- Scaleway Object Storage
- "StackPath"
- StackPath Object Storage
- "TencentCOS"
- Tencent Cloud Object Storage (COS)
- "Wasabi"
- Wasabi Object Storage
- "Other"
@@ -12044,12 +11999,12 @@ Region to connect to.
- "us-east-2"
- US East (Ohio) Region
- Needs location constraint us-east-2.
- "us-west-1"
- US West (Northern California) Region
- Needs location constraint us-west-1.
- "us-west-2"
- US West (Oregon) Region
- Needs location constraint us-west-2.
- "us-west-1"
- US West (Northern California) Region
- Needs location constraint us-west-1.
- "ca-central-1"
- Canada (Central) Region
- Needs location constraint ca-central-1.
@@ -12059,15 +12014,9 @@ Region to connect to.
- "eu-west-2"
- EU (London) Region
- Needs location constraint eu-west-2.
- "eu-west-3"
- EU (Paris) Region
- Needs location constraint eu-west-3.
- "eu-north-1"
- EU (Stockholm) Region
- Needs location constraint eu-north-1.
- "eu-south-1"
- EU (Milan) Region
- Needs location constraint eu-south-1.
- "eu-central-1"
- EU (Frankfurt) Region
- Needs location constraint eu-central-1.
@@ -12083,36 +12032,15 @@ Region to connect to.
- "ap-northeast-2"
- Asia Pacific (Seoul)
- Needs location constraint ap-northeast-2.
- "ap-northeast-3"
- Asia Pacific (Osaka-Local)
- Needs location constraint ap-northeast-3.
- "ap-south-1"
- Asia Pacific (Mumbai)
- Needs location constraint ap-south-1.
- "ap-east-1"
- Asia Pacific (Hong Kong) Region
- Asia Patific (Hong Kong) Region
- Needs location constraint ap-east-1.
- "sa-east-1"
- South America (Sao Paulo) Region
- Needs location constraint sa-east-1.
- "me-south-1"
- Middle East (Bahrain) Region
- Needs location constraint me-south-1.
- "af-south-1"
- Africa (Cape Town) Region
- Needs location constraint af-south-1.
- "cn-north-1"
- China (Beijing) Region
- Needs location constraint cn-north-1.
- "cn-northwest-1"
- China (Ningxia) Region
- Needs location constraint cn-northwest-1.
- "us-gov-east-1"
- AWS GovCloud (US-East) Region
- Needs location constraint us-gov-east-1.
- "us-gov-west-1"
- AWS GovCloud (US) Region
- Needs location constraint us-gov-west-1.
#### --s3-region
@@ -12368,54 +12296,6 @@ Endpoint for StackPath Object Storage.
#### --s3-endpoint
Endpoint for Tencent COS API.
- Config: endpoint
- Env Var: RCLONE_S3_ENDPOINT
- Type: string
- Default: ""
- Examples:
- "cos.ap-beijing.myqcloud.com"
- Beijing Region.
- "cos.ap-nanjing.myqcloud.com"
- Nanjing Region.
- "cos.ap-shanghai.myqcloud.com"
- Shanghai Region.
- "cos.ap-guangzhou.myqcloud.com"
- Guangzhou Region.
- "cos.ap-nanjing.myqcloud.com"
- Nanjing Region.
- "cos.ap-chengdu.myqcloud.com"
- Chengdu Region.
- "cos.ap-chongqing.myqcloud.com"
- Chongqing Region.
- "cos.ap-hongkong.myqcloud.com"
- Hong Kong (China) Region.
- "cos.ap-singapore.myqcloud.com"
- Singapore Region.
- "cos.ap-mumbai.myqcloud.com"
- Mumbai Region.
- "cos.ap-seoul.myqcloud.com"
- Seoul Region.
- "cos.ap-bangkok.myqcloud.com"
- Bangkok Region.
- "cos.ap-tokyo.myqcloud.com"
- Tokyo Region.
- "cos.na-siliconvalley.myqcloud.com"
- Silicon Valley Region.
- "cos.na-ashburn.myqcloud.com"
- Virginia Region.
- "cos.na-toronto.myqcloud.com"
- Toronto Region.
- "cos.eu-frankfurt.myqcloud.com"
- Frankfurt Region.
- "cos.eu-moscow.myqcloud.com"
- Moscow Region.
- "cos.accelerate.myqcloud.com"
- Use Tencent COS Accelerate Endpoint.
#### --s3-endpoint
Endpoint for S3 API.
Required when using an S3 clone.
@@ -12453,22 +12333,18 @@ Used when creating buckets only.
- Empty for US Region, Northern Virginia or Pacific Northwest.
- "us-east-2"
- US East (Ohio) Region.
- "us-west-1"
- US West (Northern California) Region.
- "us-west-2"
- US West (Oregon) Region.
- "us-west-1"
- US West (Northern California) Region.
- "ca-central-1"
- Canada (Central) Region.
- "eu-west-1"
- EU (Ireland) Region.
- "eu-west-2"
- EU (London) Region.
- "eu-west-3"
- EU (Paris) Region.
- "eu-north-1"
- EU (Stockholm) Region.
- "eu-south-1"
- EU (Milan) Region.
- "EU"
- EU Region.
- "ap-southeast-1"
@@ -12478,27 +12354,13 @@ Used when creating buckets only.
- "ap-northeast-1"
- Asia Pacific (Tokyo) Region.
- "ap-northeast-2"
- Asia Pacific (Seoul) Region.
- "ap-northeast-3"
- Asia Pacific (Osaka-Local) Region.
- Asia Pacific (Seoul)
- "ap-south-1"
- Asia Pacific (Mumbai) Region.
- Asia Pacific (Mumbai)
- "ap-east-1"
- Asia Pacific (Hong Kong) Region.
- Asia Pacific (Hong Kong)
- "sa-east-1"
- South America (Sao Paulo) Region.
- "me-south-1"
- Middle East (Bahrain) Region.
- "af-south-1"
- Africa (Cape Town) Region.
- "cn-north-1"
- China (Beijing) Region
- "cn-northwest-1"
- China (Ningxia) Region.
- "us-gov-east-1"
- AWS GovCloud (US-East) Region.
- "us-gov-west-1"
- AWS GovCloud (US) Region.
#### --s3-location-constraint
@@ -12601,8 +12463,6 @@ doesn't copy the ACL from the source but rather writes a fresh one.
- Type: string
- Default: ""
- Examples:
- "default"
- Owner gets Full_CONTROL. No one else has access rights (default).
- "private"
- Owner gets FULL_CONTROL. No one else has access rights (default).
- "public-read"
@@ -12703,24 +12563,6 @@ The storage class to use when storing new objects in OSS.
#### --s3-storage-class
The storage class to use when storing new objects in Tencent COS.
- Config: storage_class
- Env Var: RCLONE_S3_STORAGE_CLASS
- Type: string
- Default: ""
- Examples:
- ""
- Default
- "STANDARD"
- Standard storage class
- "ARCHIVE"
- Archive storage mode.
- "STANDARD_IA"
- Infrequent access storage mode.
#### --s3-storage-class
The storage class to use when storing new objects in S3.
- Config: storage_class
@@ -12737,7 +12579,7 @@ The storage class to use when storing new objects in S3.
### Advanced Options
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
#### --s3-bucket-acl
@@ -12958,7 +12800,7 @@ if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
false - rclone will do this automatically based on the provider
setting.
@@ -13827,138 +13669,6 @@ d) Delete this remote
y/e/d> y
```
### Tencent COS {#tencent-cos}
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
To configure access to Tencent COS, follow the steps below:
1. Run `rclone config` and select `n` for a new remote.
```
rclone config
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
```
2. Give the name of the configuration. For example, name it 'cos'.
```
name> cos
```
3. Select `s3` storage.
```
Choose a number from below, or type in your own value
1 / 1Fichier
\ "fichier"
2 / Alias for an existing remote
\ "alias"
3 / Amazon Drive
\ "amazon cloud drive"
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
\ "s3"
[snip]
Storage> s3
```
4. Select `TencentCOS` provider.
```
Choose a number from below, or type in your own value
1 / Amazon Web Services (AWS) S3
\ "AWS"
[snip]
11 / Tencent Cloud Object Storage (COS)
\ "TencentCOS"
[snip]
provider> TencentCOS
```
5. Enter your SecretId and SecretKey of Tencent Cloud.
```
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Enter a boolean value (true or false). Press Enter for the default ("false").
Choose a number from below, or type in your own value
1 / Enter AWS credentials in the next step
\ "false"
2 / Get AWS credentials from the environment (env vars or IAM)
\ "true"
env_auth> 1
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a string value. Press Enter for the default ("").
access_key_id> AKIDxxxxxxxxxx
AWS Secret Access Key (password)
Leave blank for anonymous access or runtime credentials.
Enter a string value. Press Enter for the default ("").
secret_access_key> xxxxxxxxxxx
```
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
```
1 / Beijing Region.
\ "cos.ap-beijing.myqcloud.com"
2 / Nanjing Region.
\ "cos.ap-nanjing.myqcloud.com"
3 / Shanghai Region.
\ "cos.ap-shanghai.myqcloud.com"
4 / Guangzhou Region.
\ "cos.ap-guangzhou.myqcloud.com"
[snip]
endpoint> 4
```
7. Choose acl and storage class.
```
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Owner gets Full_CONTROL. No one else has access rights (default).
\ "default"
[snip]
acl> 1
The storage class to use when storing new objects in Tencent COS.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Default
\ ""
[snip]
storage_class> 1
Edit advanced config? (y/n)
y) Yes
n) No (default)
y/n> n
Remote config
--------------------
[cos]
type = s3
provider = TencentCOS
env_auth = false
access_key_id = xxx
secret_access_key = xxx
endpoint = cos.ap-guangzhou.myqcloud.com
acl = default
--------------------
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
Current remotes:
Name Type
==== ====
cos s3
```
### Netease NOS ###
For Netease NOS configure as per the configurator `rclone config`
@@ -14877,8 +14587,7 @@ Note that Box is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".
Box file names can't have the `\` character in. rclone maps this to
and from an identical looking unicode equivalent `` (U+FF3C Fullwidth
Reverse Solidus).
and from an identical looking unicode equivalent ``.
Box only supports filenames up to 255 characters in length.
@@ -16160,26 +15869,23 @@ See: the [encoding section in the overview](https://rclone.org/overview/#encodin
Crypt
----------------------------------------
Rclone `crypt` remotes encrypt and decrypt other remotes.
The `crypt` remote encrypts and decrypts another remote.
To use `crypt`, first set up the underlying remote. Follow the `rclone
config` instructions for that remote.
To use it first set up the underlying remote following the config
instructions for that remote. You can also use a local pathname
instead of a remote which will encrypt and decrypt from that directory
which might be useful for encrypting onto a USB stick for example.
`crypt` applied to a local pathname instead of a remote will
encrypt and decrypt that directory, and can be used to encrypt USB
removable drives.
First check your chosen remote is working - we'll call it
`remote:path` in these docs. Note that anything inside `remote:path`
will be encrypted and anything outside won't. This means that if you
are using a bucket based remote (eg S3, B2, swift) then you should
probably put the bucket in the remote `s3:bucket`. If you just use
`s3:` then rclone will make encrypted bucket names too (if using file
name encryption) which may or may not be what you want.
Before configuring the crypt remote, check the underlying remote is
working. In this example the underlying remote is called `remote:path`.
Anything inside `remote:path` will be encrypted and anything outside
will not. In the case of an S3 based underlying remote (eg Amazon S3,
B2, Swift) it is generally advisable to define a crypt remote in the
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
file name encryption, rclone will encrypt the bucket name.
Configure `crypt` using `rclone config`. In this example the `crypt`
remote is called `secret`, to differentiate it from the underlying
`remote`.
Now configure `crypt` using `rclone config`. We will call this one
`secret` to differentiate it from the `remote`.
```
No remotes found - make a new one
@@ -16253,42 +15959,49 @@ d) Delete this remote
y/e/d> y
```
**Important** The crypt password stored in `rclone.conf` is lightly
obscured. That only protects it from cursory inspection. It is not
secure unless encryption of `rclone.conf` is specified.
**Important** The password is stored in the config file is lightly
obscured so it isn't immediately obvious what it is. It is in no way
secure unless you use config file encryption.
A long passphrase is recommended, or `rclone config` can generate a
random one.
A long passphrase is recommended, or you can use a random one.
The obscured password is created using AES-CTR with a static key. The
salt is stored verbatim at the beginning of the obscured password. This
static key is shared between all versions of rclone.
The obscured password is created by using AES-CTR with a static key, with
the salt stored verbatim at the beginning of the obscured password. This
static key is shared by between all versions of rclone.
If you reconfigure rclone with the same passwords/passphrases
elsewhere it will be compatible, but the obscured version will be different
due to the different salt.
Rclone does not encrypt
Note that rclone does not encrypt
* file length - this can be calculated within 16 bytes
* modification time - used for syncing
## Specifying the remote ##
In normal use, ensure the remote has a `:` in. If specified without,
rclone uses a local directory of that name. For example if a remote
`/path/to/secret/files` is specified, rclone encrypts content to that
directory. If a remote `name` is specified, rclone targets a directory
`name` in the current directory.
In normal use, make sure the remote has a `:` in. If you specify the
remote without a `:` then rclone will use a local directory of that
name. So if you use a remote of `/path/to/secret/files` then rclone
will encrypt stuff to that directory. If you use a remote of `name`
then rclone will put files in a directory called `name` in the current
directory.
If remote `remote:path/to/dir` is specified, rclone stores encrypted
files in `path/to/dir` on the remote. With file name encryption, files
saved to `secret:subdir/subfile` are stored in the unencrypted path
`path/to/dir` but the `subdir/subpath` element is encrypted.
If you specify the remote as `remote:path/to/dir` then rclone will
store encrypted files in `path/to/dir` on the remote. If you are using
file name encryption, then when you save files to
`secret:subdir/subfile` this will store them in the unencrypted path
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
Note that unless you want encrypted bucket names (which are difficult
to manage because you won't know what directory they represent in web
interfaces etc), you should probably specify a bucket, eg
`remote:secretbucket` when using bucket based remotes such as S3,
Swift, Hubic, B2, GCS.
## Example ##
Create the following file structure using "standard" file name
To test I made a little directory of files using "standard" file name
encryption.
```
@@ -16302,7 +16015,7 @@ plaintext/
└── file4.txt
```
Copy these to the remote, and list them
Copy these to the remote and list them back
```
$ rclone -q copy plaintext secret:
@@ -16314,7 +16027,7 @@ $ rclone -q ls secret:
9 subdir/file3.txt
```
The crypt remote looks like
Now see what that looked like when encrypted
```
$ rclone -q ls remote:path
@@ -16325,7 +16038,7 @@ $ rclone -q ls remote:path
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
```
The directory structure is preserved
Note that this retains the directory structure which means you can do this
```
$ rclone -q ls secret:subdir
@@ -16334,9 +16047,9 @@ $ rclone -q ls secret:subdir
10 subsubdir/file4.txt
```
Without file name encryption `.bin` extensions are added to underlying
names. This prevents the cloud provider attempting to interpret file
content.
If don't use file name encryption then the remote will look like this
- note the `.bin` extensions added to prevent the cloud provider
attempting to interpret the data.
```
$ rclone -q ls remote:path
@@ -16349,6 +16062,8 @@ $ rclone -q ls remote:path
### File name encryption modes ###
Here are some of the features of the file name encryption modes
Off
* doesn't hide file names or directory structure
@@ -16367,19 +16082,17 @@ Standard
Obfuscation
This is a simple "rotate" of the filename, with each file having a rot
distance based on the filename. Rclone stores the distance at the
beginning of the filename. A file called "hello" may become "53.jgnnq".
distance based on the filename. We store the distance at the beginning
of the filename. So a file called "hello" may become "53.jgnnq".
Obfuscation is not a strong encryption of filenames, but hinders
automated scanning tools picking up on filename patterns. It is an
intermediate between "off" and "standard" which allows for longer path
segment names.
This is not a strong encryption of filenames, but it may stop automated
scanning tools from picking up on filename patterns. As such it's an
intermediate between "off" and "standard". The advantage is that it
allows for longer path segment names.
There is a possibility with some unicode based filenames that the
obfuscation is weak and may map lower case characters to upper case
equivalents.
Obfuscation cannot be relied upon for strong protection.
equivalents. You can not rely on this for strong protection.
* file names very lightly obfuscated
* file names can be longer than standard encryption
@@ -16387,14 +16100,13 @@ Obfuscation cannot be relied upon for strong protection.
* directory structure visible
* identical files names will have identical uploaded names
Cloud storage systems have limits on file name length and
total path length which rclone is more likely to breach using
"Standard" file name encryption. Where file names are less thn 156
characters in length issues should not be encountered, irrespective of
cloud storage provider.
Cloud storage systems have various limits on file name length and
total path length which you are more likely to hit using "Standard"
file name encryption. If you keep your file names to below 156
characters in length then you should be OK on all providers.
An alternative, future rclone file name encryption mode may tolerate
backend provider path length limits.
There may be an even more secure file name encryption mode in the
future which will address the long file name problem.
### Directory name encryption ###
Crypt offers the option of encrypting dir names or leaving them intact.
@@ -16420,10 +16132,10 @@ Example:
Crypt stores modification times using the underlying remote so support
depends on that.
Hashes are not stored for crypt. However the data integrity is
Hashes are not stored for crypt. However the data integrity is
protected by an extremely strong crypto authenticator.
Use the `rclone cryptcheck` command to check the
Note that you should use the `rclone cryptcheck` command to check the
integrity of a crypted remote instead of `rclone check` which can't
check the checksums properly.
@@ -18285,10 +17997,8 @@ Here are the standard options specific to drive (Google Drive).
#### --drive-client-id
Google Application Client Id
Setting your own is recommended.
See https://rclone.org/drive/#making-your-own-client-id for how to create your own.
If you leave this blank, it will use an internal key which is low performance.
OAuth Client Id
Leave blank normally.
- Config: client_id
- Env Var: RCLONE_DRIVE_CLIENT_ID
@@ -19978,13 +19688,8 @@ flag.
Note that Jottacloud requires the MD5 hash before upload so if the
source does not have an MD5 checksum then the file will be cached
temporarily on disk (wherever the `TMPDIR` environment variable points
to) before it is uploaded. Small files will be cached in memory - see
to) before it is uploaded. Small files will be cached in memory - see
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
When uploading from local disk the source checksum is always available,
so this does not apply. Starting with rclone version 1.52 the same is
true for crypted remotes (in older versions the crypt backend would not
calculate hashes for uploads from local disk, so the Jottacloud
backend had to do it as described above).
#### Restricted filename characters
@@ -25727,86 +25432,6 @@ Options:
# Changelog
## v1.53.2 - 2020-10-26
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
* Bug Fixes
* acounting
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
* operations
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
* build
* Work around GitHub actions brew problem (Nick Craig-Wood)
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
* Mount
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
* VFS
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
* Fix a race condition in retryFailedResets (Leo Luan)
* Fix missed concurrency control between some item operations and reset (Leo Luan)
* Add exponential backoff during ENOSPC retries (Leo Luan)
* Add a missed update of used cache space (Leo Luan)
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
* Local
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
* Chunker
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
* Fix upload over crypt (Ivan Andreev)
* Fichier
* Increase maximum file size from 100GB to 300GB (gyutw)
* Jottacloud
* Remove clientSecret from config when upgrading to token based authentication (buengese)
* Avoid double url escaping of device/mountpoint (albertony)
* Remove DirMove workaround as it's not required anymore - also (buengese)
* Mailru
* Fix uploads after recent changes on server (Ivan Andreev)
* Fix range requests after june changes on server (Ivan Andreev)
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
* Onedrive
* Fix disk usage for sharepoint (Nick Craig-Wood)
* S3
* Add missing regions for AWS (Anagh Kumar Baranwal)
* Seafile
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
* SFTP
* Always convert the checksum to lower case (buengese)
* Union
* Create root directories if none exist (Nick Craig-Wood)
## v1.53.1 - 2020-09-13
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
* Bug Fixes
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
* check
* Add back missing --download flag (Nick Craig-Wood)
* Fix docs (Nick Craig-Wood)
* docs
* Note --log-file does append (Nick Craig-Wood)
* Add full stops for consistency in rclone --help (edwardxml)
* Add Tencent COS to s3 provider list (wjielai)
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
* Fix formatting of rc docs page (Nick Craig-Wood)
* build
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
* VFS
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
* Local
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
* Drive
* Re-adds special oauth help text (Tim Gallant)
* Opendrive
* Do not retry 400 errors (Evan Harris)
## v1.53.0 - 2020-09-02
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)

16770
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,8 +8,7 @@ VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master
@@ -247,13 +246,5 @@ startdev:
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -64,7 +64,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
## Making a release
* git checkout master # see below for stable branch
* git checkout master
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
@@ -31,7 +31,7 @@ This file describes how to make the various kinds of releases
* make upload
* make upload_website
* make upload_github
* make startdev # make startstable for stable branch
* make startdev
* # announce with forum post, twitter post, patreon post
Early in the next release cycle update the dependencies
@@ -42,35 +42,62 @@ Early in the next release cycle update the dependencies
* git add new files
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
## Making a point release
If rclone needs a point release due to some horrendous bug:
Set vars
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.52
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
* FIXME this is now broken with new semver layout - needs fixing
* FIXME the TAG=${NEW_TAG} shouldn't be necessary any more
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* Do the steps as above
* make startstable
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-stable
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* # cherry pick the changes to the changelog
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* make VERSION=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
* git commit --amend
* git push
* Announce!
## Making a manual build of docker

View File

@@ -1 +1 @@
v1.53.2
v1.54.0

View File

@@ -296,8 +296,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
ServerSideAcrossConfigs: true,
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}
@@ -960,8 +958,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
info := f.wrapInfo(src, chunkRemote, size)
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
c.chunkLimit = c.chunkSize
// TODO: handle range/limit options
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
if errChunk != nil {
@@ -1170,14 +1166,10 @@ func (c *chunkingReader) updateHashes() {
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
if c.chunkLimit <= 0 {
// Chunk complete - switch to next one.
// Note #1:
// We might not get here because some remotes (eg. box multi-uploader)
// read the specified size exactly and skip the concluding EOF Read.
// Then a check in the put loop will kick in.
// Note #2:
// The crypt backend after receiving EOF here will call Read again
// and we must insist on returning EOF, so we postpone refilling
// chunkLimit to the main loop.
c.chunkLimit = c.chunkSize
return 0, io.EOF
}
if int64(len(buf)) > c.chunkLimit {

View File

@@ -157,17 +157,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
return false
}
func driveOAuthOptions() []fs.Option {
opts := []fs.Option{}
for _, opt := range oauthutil.SharedOptions {
if opt.Name == config.ConfigClientID {
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
}
opts = append(opts, opt)
}
return opts
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -203,7 +192,7 @@ func init() {
log.Fatalf("Failed to configure team drive: %v", err)
}
},
Options: append(driveOAuthOptions(), []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.",
Examples: []fs.OptionExample{{

View File

@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles

View File

@@ -373,9 +373,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
@@ -387,6 +384,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oauthConfig.ClientID = "jottacli"
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
@@ -553,7 +551,7 @@ func (f *Fs) setEndpointURL() {
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path
@@ -1089,7 +1087,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
retry, _ := shouldRetry(resp, err)
return (retry && resp.StatusCode != 500), err
})
if err != nil {
return nil, err
@@ -1193,6 +1192,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
// surprise! jottacloud fucked up dirmove - the api spits out an error but
// dir gets moved regardless
if apiErr, ok := err.(*api.Error); ok {
if apiErr.StatusCode == 500 {
_, err := f.NewObject(ctx, dstRemote)
if err == fs.ErrorNotAFile {
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
return nil
}
return err
}
}
if err != nil {
return errors.Wrap(err, "couldn't move directory")
}

View File

@@ -1213,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Errorf(o, "Failed to set sparse: %v", err)
fs.Debugf(o, "Failed to set sparse: %v", err)
}
}
@@ -1231,15 +1231,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
if runtime.GOOS == "windows" && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {
o.size = int64(len(linkdst))
}
}
}
// Stat an Object into info

View File

@@ -6,6 +6,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@@ -88,6 +89,9 @@ func TestSymlink(t *testing.T) {
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -117,6 +121,9 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
@@ -135,7 +142,9 @@ func TestSymlink(t *testing.T) {
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")

View File

@@ -117,7 +117,7 @@ type ListItem struct {
Name string `json:"name"`
Home string `json:"home"`
Size int64 `json:"size"`
Mtime uint64 `json:"mtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Hash string `json:"hash,omitempty"`
VirusScan string `json:"virus_scan,omitempty"`
Tree string `json:"tree,omitempty"`
@@ -159,6 +159,71 @@ type FolderInfoResponse struct {
Email string `json:"email"`
}
// ShardInfoResponse ...
type ShardInfoResponse struct {
Email string `json:"email"`
Body struct {
Video []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"video"`
ViewDirect []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view_direct"`
WeblinkView []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_view"`
WeblinkVideo []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_video"`
WeblinkGet []struct {
Count int `json:"count"`
URL string `json:"url"`
} `json:"weblink_get"`
Stock []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"stock"`
WeblinkThumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_thumbnails"`
PublicUpload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"public_upload"`
Auth []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"auth"`
Web []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"web"`
View []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view"`
Upload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"upload"`
Get []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"get"`
Thumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"thumbnails"`
} `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// CleanupResponse ...
type CleanupResponse struct {
Email string `json:"email"`

View File

@@ -37,7 +37,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"github.com/pkg/errors"
@@ -656,14 +655,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
if err != nil {
return nil, -1, err
}
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
switch item.Kind {
case "folder":
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
dirSize := item.Count.Files + item.Count.Folders
return dir, dirSize, nil
case "file":
@@ -677,7 +671,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: time.Unix(mTime, 0),
modTime: time.Unix(item.Mtime, 0),
}
return file, -1, nil
default:
@@ -1867,30 +1861,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
return f.shardURL, nil
}
opts := rest.Opts{
RootURL: api.DispatchServerURL,
Method: "GET",
Path: "/u",
}
var (
res *http.Response
url string
err error
)
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
if err == nil {
url, err = readBodyWord(res)
}
return fserrors.ShouldRetry(err), err
})
token, err := f.accessToken()
if err != nil {
closeBody(res)
return "", err
}
f.shardURL = url
opts := rest.Opts{
Method: "GET",
Path: "/api/m1/dispatcher",
Parameters: url.Values{
"client_id": {api.OAuthClientID},
"access_token": {token},
},
}
var info api.ShardInfoResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
return "", err
}
f.shardURL = info.Body.Upload[0].URL
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
fs.Debugf(f, "new upload shard: %s", f.shardURL)
@@ -2122,18 +2116,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
start, end, partialRequest := getTransferRange(o.size, options...)
headers := map[string]string{
"Accept": "*/*",
"Content-Type": "application/octet-stream",
}
if partialRequest {
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
headers["Range"] = rangeStr
// headers["Content-Range"] = rangeStr
headers["Accept-Ranges"] = "bytes"
}
start, end, partial := getTransferRange(o.size, options...)
// TODO: set custom timeouts
opts := rest.Opts{
@@ -2144,7 +2127,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
"client_id": {api.OAuthClientID},
"token": {token},
},
ExtraHeaders: headers,
ExtraHeaders: map[string]string{
"Accept": "*/*",
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
},
}
var res *http.Response
@@ -2165,36 +2151,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
// Server should respond with Status 206 and Content-Range header to a range
// request. Status 200 (and no Content-Range) means a full-content response.
partialResponse := res.StatusCode == 206
var (
hasher gohash.Hash
wrapStream io.ReadCloser
)
if !partialResponse {
var hasher gohash.Hash
if !partial {
// Cannot check hash of partial download
hasher = mrhash.New()
}
wrapStream = &endHandler{
wrapStream := &endHandler{
ctx: ctx,
stream: res.Body,
hasher: hasher,
o: o,
server: server,
}
if partialRequest && !partialResponse {
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil {
return nil, err
}
}
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
}
return wrapStream, nil
}

View File

@@ -1247,10 +1247,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, errors.Wrap(err, "about failed")
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
return &fs.Usage{}, nil
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Used), // bytes in use

View File

@@ -646,6 +646,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
400, // Bad request (seen in "Next token is expired")
401, // Unauthorized (seen in "Token has expired")
408, // Request Timeout
423, // Locked - get this on folders sometimes

View File

@@ -58,7 +58,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
@@ -94,9 +94,6 @@ func init() {
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
}, {
Value: "TencentCOS",
Help: "Tencent Cloud Object Storage (COS)",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
@@ -122,9 +119,6 @@ func init() {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
// References:
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
Name: "region",
Help: "Region to connect to.",
Provider: "AWS",
@@ -134,12 +128,12 @@ func init() {
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
@@ -149,15 +143,9 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-west-3",
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-south-1",
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
@@ -173,36 +161,15 @@ func init() {
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
}, {
Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
}, {
Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
}, {
Value: "af-south-1",
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
}, {
Value: "cn-north-1",
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
}, {
Value: "cn-northwest-1",
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
}, {
Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
}, {
Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
}},
}, {
Name: "region",
@@ -218,7 +185,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
Provider: "!AWS,Alibaba,Scaleway",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
@@ -509,73 +476,10 @@ func init() {
Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint",
}},
}, {
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
Name: "endpoint",
Help: "Endpoint for Tencent COS API.",
Provider: "TencentCOS",
Examples: []fs.OptionExample{{
Value: "cos.ap-beijing.myqcloud.com",
Help: "Beijing Region.",
}, {
Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.",
}, {
Value: "cos.ap-shanghai.myqcloud.com",
Help: "Shanghai Region.",
}, {
Value: "cos.ap-guangzhou.myqcloud.com",
Help: "Guangzhou Region.",
}, {
Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.",
}, {
Value: "cos.ap-chengdu.myqcloud.com",
Help: "Chengdu Region.",
}, {
Value: "cos.ap-chongqing.myqcloud.com",
Help: "Chongqing Region.",
}, {
Value: "cos.ap-hongkong.myqcloud.com",
Help: "Hong Kong (China) Region.",
}, {
Value: "cos.ap-singapore.myqcloud.com",
Help: "Singapore Region.",
}, {
Value: "cos.ap-mumbai.myqcloud.com",
Help: "Mumbai Region.",
}, {
Value: "cos.ap-seoul.myqcloud.com",
Help: "Seoul Region.",
}, {
Value: "cos.ap-bangkok.myqcloud.com",
Help: "Bangkok Region.",
}, {
Value: "cos.ap-tokyo.myqcloud.com",
Help: "Tokyo Region.",
}, {
Value: "cos.na-siliconvalley.myqcloud.com",
Help: "Silicon Valley Region.",
}, {
Value: "cos.na-ashburn.myqcloud.com",
Help: "Virginia Region.",
}, {
Value: "cos.na-toronto.myqcloud.com",
Help: "Toronto Region.",
}, {
Value: "cos.eu-frankfurt.myqcloud.com",
Help: "Frankfurt Region.",
}, {
Value: "cos.eu-moscow.myqcloud.com",
Help: "Moscow Region.",
}, {
Value: "cos.accelerate.myqcloud.com",
Help: "Use Tencent COS Accelerate Endpoint.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -615,12 +519,12 @@ func init() {
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region.",
@@ -630,15 +534,9 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-west-3",
Help: "EU (Paris) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "eu-south-1",
Help: "EU (Milan) Region.",
}, {
Value: "EU",
Help: "EU Region.",
@@ -653,37 +551,16 @@ func init() {
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul) Region.",
}, {
Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local) Region.",
Help: "Asia Pacific (Seoul)",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai) Region.",
Help: "Asia Pacific (Mumbai)",
}, {
Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong) Region.",
Help: "Asia Pacific (Hong Kong)",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region.",
}, {
Value: "af-south-1",
Help: "Africa (Cape Town) Region.",
}, {
Value: "cn-north-1",
Help: "China (Beijing) Region",
}, {
Value: "cn-northwest-1",
Help: "China (Ningxia) Region.",
}, {
Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region.",
}, {
Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region.",
}},
}, {
Name: "location_constraint",
@@ -789,7 +666,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -801,13 +678,9 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
Provider: "TencentCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "!IBMCOS,TencentCOS",
Provider: "!IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
@@ -969,24 +842,6 @@ isn't set then "acl" is used instead.`,
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
Name: "storage_class",
Help: "The storage class to use when storing new objects in Tencent COS.",
Provider: "TencentCOS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
Name: "storage_class",
@@ -1120,7 +975,7 @@ if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
false - rclone will do this automatically based on the provider
setting.`,
Default: true,
@@ -1450,7 +1305,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
opt.ForcePathStyle = false
}
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
@@ -1732,7 +1587,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
//
// So we enable only on providers we know supports it properly, all others can retry when a
// XML Syntax error is detected.
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{

View File

@@ -46,7 +46,7 @@ type Library struct {
Encrypted bool `json:"encrypted"`
Owner string `json:"owner"`
ID string `json:"id"`
Size int64 `json:"size"`
Size int `json:"size"`
Name string `json:"name"`
Modified int64 `json:"mtime"`
}

View File

@@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
for _, library := range libraries {
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
d.SetSize(library.Size)
d.SetSize(int64(library.Size))
entries = append(entries, d)
}

View File

@@ -1087,7 +1087,7 @@ func shellEscape(str string) string {
func parseHash(bytes []byte) string {
// For strings with backslash *sum writes a leading \
// https://unix.stackexchange.com/q/313733/94054
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
}
// Parses the byte array output from the SSH session

View File

@@ -145,16 +145,11 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
upstreams, err := f.create(ctx, dir)
if err == fs.ErrorObjectNotFound {
if dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
} else if dir == "" {
// If root dirs not created then create them
upstreams, err = f.upstreams, nil
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
}
if err != nil {
return err
@@ -823,7 +818,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,

View File

@@ -280,7 +280,7 @@ func stripVersion(goarch string) string {
// build the binary in dir returning success or failure
func compileArch(version, goos, goarch, dir string) bool {
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
log.Printf("Compiling %s/%s", goos, goarch)
output := filepath.Join(dir, "rclone")
if goos == "windows" {
output += ".exe"
@@ -298,6 +298,7 @@ func compileArch(version, goos, goarch, dir string) bool {
"go", "build",
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
"-trimpath",
"-i",
"-o", output,
"-tags", *tags,
"..",
@@ -324,7 +325,7 @@ func compileArch(version, goos, goarch, dir string) bool {
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
}
if *copyAs != "" {
for _, artifact := range artifacts {

View File

@@ -29,7 +29,6 @@ var (
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
AddFlags(cmdFlags)
}
@@ -51,7 +50,7 @@ the source match the files in the destination, not the other way
around. This means that extra files in the destination that are not in
the source will not be detected.
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
and |--error| flags write paths, one per line, to the file name (or
stdout if it is |-|) supplied. What they write is described in the
help below. For example |--differ| will write all paths which are

View File

@@ -14,7 +14,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "cleanup remote:path",
Short: `Clean up the remote if possible.`,
Short: `Clean up the remote if possible`,
Long: `
Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.

View File

@@ -22,7 +22,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "copy source:path dest:path",
Short: `Copy files from source to dest, skipping already copied.`,
Short: `Copy files from source to dest, skipping already copied`,
Long: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or

View File

@@ -15,7 +15,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "copyto source:path dest:path",
Short: `Copy files from source to dest, skipping already copied.`,
Short: `Copy files from source to dest, skipping already copied`,
Long: `
If source:path is a file or directory then it copies it to a file or
directory named dest:path.

View File

@@ -44,7 +44,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "lsf remote:path",
Short: `List directories and objects in remote:path formatted for parsing.`,
Short: `List directories and objects in remote:path formatted for parsing`,
Long: `
List the contents of the source path (directories and objects) to
standard output in a form which is easy to parse by scripts. By

View File

@@ -67,8 +67,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
modTime := node.ModTime()
// set attributes
vfs := node.VFS()
attr.Owner.Gid = vfs.Opt.GID
attr.Owner.Uid = vfs.Opt.UID
attr.Owner.Gid = vfs.Opt.UID
attr.Owner.Uid = vfs.Opt.GID
attr.Mode = getMode(node)
attr.Size = Size
attr.Nlink = 1

View File

@@ -192,9 +192,6 @@ Stopping the mount manually:
# OS X
umount /path/to/local/mount
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
or newer on some platforms depending on the underlying FUSE library in use.
### Installing on Windows
To run rclone ` + commandName + ` on Windows, you will need to
@@ -336,6 +333,9 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When --vfs-read-chunk-size-limit 500M is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
be copied to the vfs cache before opening with --vfs-cache-mode full.
` + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)

View File

@@ -17,7 +17,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "obscure password",
Short: `Obscure password for use in the rclone config file.`,
Short: `Obscure password for use in the rclone config file`,
Long: `In the rclone config file, human readable passwords are
obscured. Obscuring them is done by encrypting them and writing them
out in base64. This is **not** a secure way of encrypting these

View File

@@ -148,7 +148,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}

View File

@@ -404,7 +404,6 @@ Note that Box is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".
Box file names can't have the `\` character in. rclone maps this to
and from an identical looking unicode equivalent `` (U+FF3C Fullwidth
Reverse Solidus).
and from an identical looking unicode equivalent ``.
Box only supports filenames up to 255 characters in length.

View File

@@ -5,86 +5,6 @@ description: "Rclone Changelog"
# Changelog
## v1.53.2 - 2020-10-26
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
* Bug Fixes
* acounting
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
* operations
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
* build
* Work around GitHub actions brew problem (Nick Craig-Wood)
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
* Mount
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
* VFS
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
* Fix a race condition in retryFailedResets (Leo Luan)
* Fix missed concurrency control between some item operations and reset (Leo Luan)
* Add exponential backoff during ENOSPC retries (Leo Luan)
* Add a missed update of used cache space (Leo Luan)
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
* Local
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
* Chunker
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
* Fix upload over crypt (Ivan Andreev)
* Fichier
* Increase maximum file size from 100GB to 300GB (gyutw)
* Jottacloud
* Remove clientSecret from config when upgrading to token based authentication (buengese)
* Avoid double url escaping of device/mountpoint (albertony)
* Remove DirMove workaround as it's not required anymore - also (buengese)
* Mailru
* Fix uploads after recent changes on server (Ivan Andreev)
* Fix range requests after june changes on server (Ivan Andreev)
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
* Onedrive
* Fix disk usage for sharepoint (Nick Craig-Wood)
* S3
* Add missing regions for AWS (Anagh Kumar Baranwal)
* Seafile
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
* SFTP
* Always convert the checksum to lower case (buengese)
* Union
* Create root directories if none exist (Nick Craig-Wood)
## v1.53.1 - 2020-09-13
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
* Bug Fixes
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
* check
* Add back missing --download flag (Nick Craig-Wood)
* Fix docs (Nick Craig-Wood)
* docs
* Note --log-file does append (Nick Craig-Wood)
* Add full stops for consistency in rclone --help (edwardxml)
* Add Tencent COS to s3 provider list (wjielai)
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
* Fix formatting of rc docs page (Nick Craig-Wood)
* build
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
* VFS
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
* Local
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
* Drive
* Re-adds special oauth help text (Tim Gallant)
* Opendrive
* Do not retry 400 errors (Evan Harris)
## v1.53.0 - 2020-09-02
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)

View File

@@ -39,10 +39,10 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied.
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied.
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied
* [rclone copyurl](/commands/rclone_copyurl/) - Copy url content to dest.
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Cryptcheck checks the integrity of a crypted remote.
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Cryptdecode returns unencrypted file names.
@@ -56,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing.
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing
* [rclone lsjson](/commands/rclone_lsjson/) - List directories and objects in the path in JSON format.
* [rclone lsl](/commands/rclone_lsl/) - List the objects in path with modification time, size and path.
* [rclone md5sum](/commands/rclone_md5sum/) - Produces an md5sum file for all the objects in the path.
@@ -65,7 +65,7 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone move](/commands/rclone_move/) - Move files from source to dest.
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
* [rclone ncdu](/commands/rclone_ncdu/) - Explore a remote with a text based user interface.
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file.
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file
* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents.
* [rclone rc](/commands/rclone_rc/) - Run a command against a running rclone.
* [rclone rcat](/commands/rclone_rcat/) - Copies standard input to file on remote.

View File

@@ -29,7 +29,7 @@ the source match the files in the destination, not the other way
around. This means that extra files in the destination that are not in
the source will not be detected.
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
and `--error` flags write paths, one per line, to the file name (or
stdout if it is `-`) supplied. What they write is described in the
help below. For example `--differ` will write all paths which are
@@ -55,7 +55,6 @@ rclone check source:path dest:path [flags]
```
--combined string Make a combined report of changes to this file
--differ string Report all non-matching files to this file
--download Check by downloading rather than with hash.
--error string Report all files with errors (hashing or reading) to this file
-h, --help help for check
--match string Report all matching files to this file

View File

@@ -1,13 +1,13 @@
---
title: "rclone cleanup"
description: "Clean up the remote if possible."
description: "Clean up the remote if possible"
slug: rclone_cleanup
url: /commands/rclone_cleanup/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
---
# rclone cleanup
Clean up the remote if possible.
Clean up the remote if possible
## Synopsis

View File

@@ -1,13 +1,13 @@
---
title: "rclone copy"
description: "Copy files from source to dest, skipping already copied."
description: "Copy files from source to dest, skipping already copied"
slug: rclone_copy
url: /commands/rclone_copy/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
---
# rclone copy
Copy files from source to dest, skipping already copied.
Copy files from source to dest, skipping already copied
## Synopsis

View File

@@ -1,13 +1,13 @@
---
title: "rclone copyto"
description: "Copy files from source to dest, skipping already copied."
description: "Copy files from source to dest, skipping already copied"
slug: rclone_copyto
url: /commands/rclone_copyto/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
---
# rclone copyto
Copy files from source to dest, skipping already copied.
Copy files from source to dest, skipping already copied
## Synopsis

View File

@@ -40,7 +40,7 @@ the source match the files in the destination, not the other way
around. This means that extra files in the destination that are not in
the source will not be detected.
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
and `--error` flags write paths, one per line, to the file name (or
stdout if it is `-`) supplied. What they write is described in the
help below. For example `--differ` will write all paths which are

View File

@@ -1,13 +1,13 @@
---
title: "rclone lsf"
description: "List directories and objects in remote:path formatted for parsing."
description: "List directories and objects in remote:path formatted for parsing"
slug: rclone_lsf
url: /commands/rclone_lsf/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
---
# rclone lsf
List directories and objects in remote:path formatted for parsing.
List directories and objects in remote:path formatted for parsing
## Synopsis

View File

@@ -49,9 +49,6 @@ Stopping the mount manually:
# OS X
umount /path/to/local/mount
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
or newer on some platforms depending on the underlying FUSE library in use.
## Installing on Windows
To run rclone mount on Windows, you will need to
@@ -194,6 +191,9 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When --vfs-read-chunk-size-limit 500M is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
be copied to the vfs cache before opening with --vfs-cache-mode full.
## VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects
@@ -357,11 +357,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -1,13 +1,13 @@
---
title: "rclone obscure"
description: "Obscure password for use in the rclone config file."
description: "Obscure password for use in the rclone config file"
slug: rclone_obscure
url: /commands/rclone_obscure/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs"
---
# rclone obscure
Obscure password for use in the rclone config file.
Obscure password for use in the rclone config file
## Synopsis

View File

@@ -196,11 +196,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -195,11 +195,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -267,11 +267,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -206,11 +206,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -275,11 +275,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
## VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -6,26 +6,23 @@ description: "Encryption overlay remote"
{{< icon "fa fa-lock" >}}Crypt
----------------------------------------
Rclone `crypt` remotes encrypt and decrypt other remotes.
The `crypt` remote encrypts and decrypts another remote.
To use `crypt`, first set up the underlying remote. Follow the `rclone
config` instructions for that remote.
To use it first set up the underlying remote following the config
instructions for that remote. You can also use a local pathname
instead of a remote which will encrypt and decrypt from that directory
which might be useful for encrypting onto a USB stick for example.
`crypt` applied to a local pathname instead of a remote will
encrypt and decrypt that directory, and can be used to encrypt USB
removable drives.
First check your chosen remote is working - we'll call it
`remote:path` in these docs. Note that anything inside `remote:path`
will be encrypted and anything outside won't. This means that if you
are using a bucket based remote (eg S3, B2, swift) then you should
probably put the bucket in the remote `s3:bucket`. If you just use
`s3:` then rclone will make encrypted bucket names too (if using file
name encryption) which may or may not be what you want.
Before configuring the crypt remote, check the underlying remote is
working. In this example the underlying remote is called `remote:path`.
Anything inside `remote:path` will be encrypted and anything outside
will not. In the case of an S3 based underlying remote (eg Amazon S3,
B2, Swift) it is generally advisable to define a crypt remote in the
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
file name encryption, rclone will encrypt the bucket name.
Configure `crypt` using `rclone config`. In this example the `crypt`
remote is called `secret`, to differentiate it from the underlying
`remote`.
Now configure `crypt` using `rclone config`. We will call this one
`secret` to differentiate it from the `remote`.
```
No remotes found - make a new one
@@ -99,42 +96,49 @@ d) Delete this remote
y/e/d> y
```
**Important** The crypt password stored in `rclone.conf` is lightly
obscured. That only protects it from cursory inspection. It is not
secure unless encryption of `rclone.conf` is specified.
**Important** The password is stored in the config file is lightly
obscured so it isn't immediately obvious what it is. It is in no way
secure unless you use config file encryption.
A long passphrase is recommended, or `rclone config` can generate a
random one.
A long passphrase is recommended, or you can use a random one.
The obscured password is created using AES-CTR with a static key. The
salt is stored verbatim at the beginning of the obscured password. This
static key is shared between all versions of rclone.
The obscured password is created by using AES-CTR with a static key, with
the salt stored verbatim at the beginning of the obscured password. This
static key is shared by between all versions of rclone.
If you reconfigure rclone with the same passwords/passphrases
elsewhere it will be compatible, but the obscured version will be different
due to the different salt.
Rclone does not encrypt
Note that rclone does not encrypt
* file length - this can be calculated within 16 bytes
* modification time - used for syncing
## Specifying the remote ##
In normal use, ensure the remote has a `:` in. If specified without,
rclone uses a local directory of that name. For example if a remote
`/path/to/secret/files` is specified, rclone encrypts content to that
directory. If a remote `name` is specified, rclone targets a directory
`name` in the current directory.
In normal use, make sure the remote has a `:` in. If you specify the
remote without a `:` then rclone will use a local directory of that
name. So if you use a remote of `/path/to/secret/files` then rclone
will encrypt stuff to that directory. If you use a remote of `name`
then rclone will put files in a directory called `name` in the current
directory.
If remote `remote:path/to/dir` is specified, rclone stores encrypted
files in `path/to/dir` on the remote. With file name encryption, files
saved to `secret:subdir/subfile` are stored in the unencrypted path
`path/to/dir` but the `subdir/subpath` element is encrypted.
If you specify the remote as `remote:path/to/dir` then rclone will
store encrypted files in `path/to/dir` on the remote. If you are using
file name encryption, then when you save files to
`secret:subdir/subfile` this will store them in the unencrypted path
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
Note that unless you want encrypted bucket names (which are difficult
to manage because you won't know what directory they represent in web
interfaces etc), you should probably specify a bucket, eg
`remote:secretbucket` when using bucket based remotes such as S3,
Swift, Hubic, B2, GCS.
## Example ##
Create the following file structure using "standard" file name
To test I made a little directory of files using "standard" file name
encryption.
```
@@ -148,7 +152,7 @@ plaintext/
└── file4.txt
```
Copy these to the remote, and list them
Copy these to the remote and list them back
```
$ rclone -q copy plaintext secret:
@@ -160,7 +164,7 @@ $ rclone -q ls secret:
9 subdir/file3.txt
```
The crypt remote looks like
Now see what that looked like when encrypted
```
$ rclone -q ls remote:path
@@ -171,7 +175,7 @@ $ rclone -q ls remote:path
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
```
The directory structure is preserved
Note that this retains the directory structure which means you can do this
```
$ rclone -q ls secret:subdir
@@ -180,9 +184,9 @@ $ rclone -q ls secret:subdir
10 subsubdir/file4.txt
```
Without file name encryption `.bin` extensions are added to underlying
names. This prevents the cloud provider attempting to interpret file
content.
If don't use file name encryption then the remote will look like this
- note the `.bin` extensions added to prevent the cloud provider
attempting to interpret the data.
```
$ rclone -q ls remote:path
@@ -195,6 +199,8 @@ $ rclone -q ls remote:path
### File name encryption modes ###
Here are some of the features of the file name encryption modes
Off
* doesn't hide file names or directory structure
@@ -213,19 +219,17 @@ Standard
Obfuscation
This is a simple "rotate" of the filename, with each file having a rot
distance based on the filename. Rclone stores the distance at the
beginning of the filename. A file called "hello" may become "53.jgnnq".
distance based on the filename. We store the distance at the beginning
of the filename. So a file called "hello" may become "53.jgnnq".
Obfuscation is not a strong encryption of filenames, but hinders
automated scanning tools picking up on filename patterns. It is an
intermediate between "off" and "standard" which allows for longer path
segment names.
This is not a strong encryption of filenames, but it may stop automated
scanning tools from picking up on filename patterns. As such it's an
intermediate between "off" and "standard". The advantage is that it
allows for longer path segment names.
There is a possibility with some unicode based filenames that the
obfuscation is weak and may map lower case characters to upper case
equivalents.
Obfuscation cannot be relied upon for strong protection.
equivalents. You can not rely on this for strong protection.
* file names very lightly obfuscated
* file names can be longer than standard encryption
@@ -233,14 +237,13 @@ Obfuscation cannot be relied upon for strong protection.
* directory structure visible
* identical files names will have identical uploaded names
Cloud storage systems have limits on file name length and
total path length which rclone is more likely to breach using
"Standard" file name encryption. Where file names are less thn 156
characters in length issues should not be encountered, irrespective of
cloud storage provider.
Cloud storage systems have various limits on file name length and
total path length which you are more likely to hit using "Standard"
file name encryption. If you keep your file names to below 156
characters in length then you should be OK on all providers.
An alternative, future rclone file name encryption mode may tolerate
backend provider path length limits.
There may be an even more secure file name encryption mode in the
future which will address the long file name problem.
### Directory name encryption ###
Crypt offers the option of encrypting dir names or leaving them intact.
@@ -266,10 +269,10 @@ Example:
Crypt stores modification times using the underlying remote so support
depends on that.
Hashes are not stored for crypt. However the data integrity is
Hashes are not stored for crypt. However the data integrity is
protected by an extremely strong crypto authenticator.
Use the `rclone cryptcheck` command to check the
Note that you should use the `rclone cryptcheck` command to check the
integrity of a crypted remote instead of `rclone check` which can't
check the checksums properly.

View File

@@ -757,8 +757,6 @@ This can be useful for tracking down problems with syncs in
combination with the `-v` flag. See the [Logging section](#logging)
for more info.
If FILE exists then rclone will append to it.
Note that if you are using the `logrotate` program to manage rclone's
logs, then you should use the `copytruncate` option as rclone doesn't
have a signal to rotate logs.
@@ -1253,17 +1251,11 @@ or with `--backup-dir`. See `--backup-dir` for more info.
For example
rclone copy -i /path/to/local/file remote:current --suffix .bak
rclone sync -i /path/to/local/file remote:current --suffix .bak
will copy `/path/to/local` to `remote:current`, but for any files
will sync `/path/to/local` to `remote:current`, but for any files
which would have been updated or deleted have .bak added.
If using `rclone sync` with `--suffix` and without `--backup-dir` then
it is recommended to put a filter rule in excluding the suffix
otherwise the `sync` will delete the backup files.
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
### --suffix-keep-extension ###
When using `--suffix`, setting this causes rclone put the SUFFIX

View File

@@ -547,10 +547,8 @@ Here are the standard options specific to drive (Google Drive).
#### --drive-client-id
Google Application Client Id
Setting your own is recommended.
See https://rclone.org/drive/#making-your-own-client-id for how to create your own.
If you leave this blank, it will use an internal key which is low performance.
OAuth Client Id
Leave blank normally.
- Config: client_id
- Env Var: RCLONE_DRIVE_CLIENT_ID

View File

@@ -147,7 +147,7 @@ These flags are available for every command.
--use-json-log Use json log format.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.2")
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.0")
-v, --verbose count Print lots more stuff (repeat for more)
```
@@ -246,7 +246,7 @@ and may be set in the config file.
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-auth-url string Auth server URL.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-id string OAuth Client Id
--drive-client-secret string OAuth Client Secret
--drive-disable-http2 Disable drive using http2 (default true)
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)

View File

@@ -148,13 +148,8 @@ flag.
Note that Jottacloud requires the MD5 hash before upload so if the
source does not have an MD5 checksum then the file will be cached
temporarily on disk (wherever the `TMPDIR` environment variable points
to) before it is uploaded. Small files will be cached in memory - see
to) before it is uploaded. Small files will be cached in memory - see
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
When uploading from local disk the source checksum is always available,
so this does not apply. Starting with rclone version 1.52 the same is
true for crypted remotes (in older versions the crypt backend would not
calculate hashes for uploads from local disk, so the Jottacloud
backend had to do it as described above).
#### Restricted filename characters

View File

@@ -537,8 +537,6 @@ OR
"result": "<Raw command line output>"
}
```
**Authentication is required for this call.**
### core/gc: Runs a garbage collection. {#core-gc}
@@ -1214,7 +1212,7 @@ This allows you to remove a plugin using it's name
This takes parameters
- name: name of the plugin in the format `author`/`plugin_name`
- name: name of the plugin in the format <author>/<plugin_name>
Eg
@@ -1228,7 +1226,7 @@ This allows you to remove a plugin using it's name
This takes the following parameters
- name: name of the plugin in the format `author`/`plugin_name`
- name: name of the plugin in the format <author>/<plugin_name>
Eg

View File

@@ -18,7 +18,6 @@ The S3 backend can be used with a number of different providers:
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
{{< /provider_list >}}
@@ -139,7 +138,7 @@ Choose a number from below, or type in your own value
/ Asia Pacific (Mumbai)
13 | Needs location constraint ap-south-1.
\ "ap-south-1"
/ Asia Pacific (Hong Kong) Region
/ Asia Patific (Hong Kong) Region
14 | Needs location constraint ap-east-1.
\ "ap-east-1"
/ South America (Sao Paulo) Region
@@ -456,7 +455,7 @@ Vault API, so rclone cannot directly access Glacier Vaults.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
### Standard Options
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
#### --s3-provider
@@ -487,8 +486,6 @@ Choose your S3 provider.
- Scaleway Object Storage
- "StackPath"
- StackPath Object Storage
- "TencentCOS"
- Tencent Cloud Object Storage (COS)
- "Wasabi"
- Wasabi Object Storage
- "Other"
@@ -545,12 +542,12 @@ Region to connect to.
- "us-east-2"
- US East (Ohio) Region
- Needs location constraint us-east-2.
- "us-west-1"
- US West (Northern California) Region
- Needs location constraint us-west-1.
- "us-west-2"
- US West (Oregon) Region
- Needs location constraint us-west-2.
- "us-west-1"
- US West (Northern California) Region
- Needs location constraint us-west-1.
- "ca-central-1"
- Canada (Central) Region
- Needs location constraint ca-central-1.
@@ -560,15 +557,9 @@ Region to connect to.
- "eu-west-2"
- EU (London) Region
- Needs location constraint eu-west-2.
- "eu-west-3"
- EU (Paris) Region
- Needs location constraint eu-west-3.
- "eu-north-1"
- EU (Stockholm) Region
- Needs location constraint eu-north-1.
- "eu-south-1"
- EU (Milan) Region
- Needs location constraint eu-south-1.
- "eu-central-1"
- EU (Frankfurt) Region
- Needs location constraint eu-central-1.
@@ -584,36 +575,15 @@ Region to connect to.
- "ap-northeast-2"
- Asia Pacific (Seoul)
- Needs location constraint ap-northeast-2.
- "ap-northeast-3"
- Asia Pacific (Osaka-Local)
- Needs location constraint ap-northeast-3.
- "ap-south-1"
- Asia Pacific (Mumbai)
- Needs location constraint ap-south-1.
- "ap-east-1"
- Asia Pacific (Hong Kong) Region
- Asia Patific (Hong Kong) Region
- Needs location constraint ap-east-1.
- "sa-east-1"
- South America (Sao Paulo) Region
- Needs location constraint sa-east-1.
- "me-south-1"
- Middle East (Bahrain) Region
- Needs location constraint me-south-1.
- "af-south-1"
- Africa (Cape Town) Region
- Needs location constraint af-south-1.
- "cn-north-1"
- China (Beijing) Region
- Needs location constraint cn-north-1.
- "cn-northwest-1"
- China (Ningxia) Region
- Needs location constraint cn-northwest-1.
- "us-gov-east-1"
- AWS GovCloud (US-East) Region
- Needs location constraint us-gov-east-1.
- "us-gov-west-1"
- AWS GovCloud (US) Region
- Needs location constraint us-gov-west-1.
#### --s3-region
@@ -869,54 +839,6 @@ Endpoint for StackPath Object Storage.
#### --s3-endpoint
Endpoint for Tencent COS API.
- Config: endpoint
- Env Var: RCLONE_S3_ENDPOINT
- Type: string
- Default: ""
- Examples:
- "cos.ap-beijing.myqcloud.com"
- Beijing Region.
- "cos.ap-nanjing.myqcloud.com"
- Nanjing Region.
- "cos.ap-shanghai.myqcloud.com"
- Shanghai Region.
- "cos.ap-guangzhou.myqcloud.com"
- Guangzhou Region.
- "cos.ap-nanjing.myqcloud.com"
- Nanjing Region.
- "cos.ap-chengdu.myqcloud.com"
- Chengdu Region.
- "cos.ap-chongqing.myqcloud.com"
- Chongqing Region.
- "cos.ap-hongkong.myqcloud.com"
- Hong Kong (China) Region.
- "cos.ap-singapore.myqcloud.com"
- Singapore Region.
- "cos.ap-mumbai.myqcloud.com"
- Mumbai Region.
- "cos.ap-seoul.myqcloud.com"
- Seoul Region.
- "cos.ap-bangkok.myqcloud.com"
- Bangkok Region.
- "cos.ap-tokyo.myqcloud.com"
- Tokyo Region.
- "cos.na-siliconvalley.myqcloud.com"
- Silicon Valley Region.
- "cos.na-ashburn.myqcloud.com"
- Virginia Region.
- "cos.na-toronto.myqcloud.com"
- Toronto Region.
- "cos.eu-frankfurt.myqcloud.com"
- Frankfurt Region.
- "cos.eu-moscow.myqcloud.com"
- Moscow Region.
- "cos.accelerate.myqcloud.com"
- Use Tencent COS Accelerate Endpoint.
#### --s3-endpoint
Endpoint for S3 API.
Required when using an S3 clone.
@@ -954,22 +876,18 @@ Used when creating buckets only.
- Empty for US Region, Northern Virginia or Pacific Northwest.
- "us-east-2"
- US East (Ohio) Region.
- "us-west-1"
- US West (Northern California) Region.
- "us-west-2"
- US West (Oregon) Region.
- "us-west-1"
- US West (Northern California) Region.
- "ca-central-1"
- Canada (Central) Region.
- "eu-west-1"
- EU (Ireland) Region.
- "eu-west-2"
- EU (London) Region.
- "eu-west-3"
- EU (Paris) Region.
- "eu-north-1"
- EU (Stockholm) Region.
- "eu-south-1"
- EU (Milan) Region.
- "EU"
- EU Region.
- "ap-southeast-1"
@@ -979,27 +897,13 @@ Used when creating buckets only.
- "ap-northeast-1"
- Asia Pacific (Tokyo) Region.
- "ap-northeast-2"
- Asia Pacific (Seoul) Region.
- "ap-northeast-3"
- Asia Pacific (Osaka-Local) Region.
- Asia Pacific (Seoul)
- "ap-south-1"
- Asia Pacific (Mumbai) Region.
- Asia Pacific (Mumbai)
- "ap-east-1"
- Asia Pacific (Hong Kong) Region.
- Asia Pacific (Hong Kong)
- "sa-east-1"
- South America (Sao Paulo) Region.
- "me-south-1"
- Middle East (Bahrain) Region.
- "af-south-1"
- Africa (Cape Town) Region.
- "cn-north-1"
- China (Beijing) Region
- "cn-northwest-1"
- China (Ningxia) Region.
- "us-gov-east-1"
- AWS GovCloud (US-East) Region.
- "us-gov-west-1"
- AWS GovCloud (US) Region.
#### --s3-location-constraint
@@ -1102,8 +1006,6 @@ doesn't copy the ACL from the source but rather writes a fresh one.
- Type: string
- Default: ""
- Examples:
- "default"
- Owner gets Full_CONTROL. No one else has access rights (default).
- "private"
- Owner gets FULL_CONTROL. No one else has access rights (default).
- "public-read"
@@ -1204,24 +1106,6 @@ The storage class to use when storing new objects in OSS.
#### --s3-storage-class
The storage class to use when storing new objects in Tencent COS.
- Config: storage_class
- Env Var: RCLONE_S3_STORAGE_CLASS
- Type: string
- Default: ""
- Examples:
- ""
- Default
- "STANDARD"
- Standard storage class
- "ARCHIVE"
- Archive storage mode.
- "STANDARD_IA"
- Infrequent access storage mode.
#### --s3-storage-class
The storage class to use when storing new objects in S3.
- Config: storage_class
@@ -1238,7 +1122,7 @@ The storage class to use when storing new objects in S3.
### Advanced Options
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
#### --s3-bucket-acl
@@ -1459,7 +1343,7 @@ if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
false - rclone will do this automatically based on the provider
setting.
@@ -2328,138 +2212,6 @@ d) Delete this remote
y/e/d> y
```
### Tencent COS {#tencent-cos}
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
To configure access to Tencent COS, follow the steps below:
1. Run `rclone config` and select `n` for a new remote.
```
rclone config
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
```
2. Give the name of the configuration. For example, name it 'cos'.
```
name> cos
```
3. Select `s3` storage.
```
Choose a number from below, or type in your own value
1 / 1Fichier
\ "fichier"
2 / Alias for an existing remote
\ "alias"
3 / Amazon Drive
\ "amazon cloud drive"
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
\ "s3"
[snip]
Storage> s3
```
4. Select `TencentCOS` provider.
```
Choose a number from below, or type in your own value
1 / Amazon Web Services (AWS) S3
\ "AWS"
[snip]
11 / Tencent Cloud Object Storage (COS)
\ "TencentCOS"
[snip]
provider> TencentCOS
```
5. Enter your SecretId and SecretKey of Tencent Cloud.
```
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Enter a boolean value (true or false). Press Enter for the default ("false").
Choose a number from below, or type in your own value
1 / Enter AWS credentials in the next step
\ "false"
2 / Get AWS credentials from the environment (env vars or IAM)
\ "true"
env_auth> 1
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a string value. Press Enter for the default ("").
access_key_id> AKIDxxxxxxxxxx
AWS Secret Access Key (password)
Leave blank for anonymous access or runtime credentials.
Enter a string value. Press Enter for the default ("").
secret_access_key> xxxxxxxxxxx
```
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
```
1 / Beijing Region.
\ "cos.ap-beijing.myqcloud.com"
2 / Nanjing Region.
\ "cos.ap-nanjing.myqcloud.com"
3 / Shanghai Region.
\ "cos.ap-shanghai.myqcloud.com"
4 / Guangzhou Region.
\ "cos.ap-guangzhou.myqcloud.com"
[snip]
endpoint> 4
```
7. Choose acl and storage class.
```
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Owner gets Full_CONTROL. No one else has access rights (default).
\ "default"
[snip]
acl> 1
The storage class to use when storing new objects in Tencent COS.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Default
\ ""
[snip]
storage_class> 1
Edit advanced config? (y/n)
y) Yes
n) No (default)
y/n> n
Remote config
--------------------
[cos]
type = s3
provider = TencentCOS
env_auth = false
access_key_id = xxx
secret_access_key = xxx
endpoint = cos.ap-guangzhou.myqcloud.com
acl = default
--------------------
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
Current remotes:
Name Type
==== ====
cos s3
```
### Netease NOS ###
For Netease NOS configure as per the configurator `rclone config`

View File

@@ -1 +1 @@
v1.53.2
v1.54.0

View File

@@ -272,7 +272,7 @@ func (s *StatsInfo) String() string {
}
}
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
dateString,
fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"),
@@ -283,7 +283,6 @@ func (s *StatsInfo) String() string {
)
if !fs.Config.StatsOneLine {
_, _ = buf.WriteRune('\n')
errorDetails := ""
switch {
case s.fatalError:
@@ -292,7 +291,6 @@ func (s *StatsInfo) String() string {
errorDetails = " (retrying may help)"
case s.errors != 0:
errorDetails = " (no need to retry)"
}
// Add only non zero stats

View File

@@ -366,8 +366,6 @@ func (sg *statsGroups) sum() *StatsInfo {
sum.lastError = stats.lastError
}
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
sum.oldDuration += stats.oldDuration
sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...)
}
stats.mu.RUnlock()
}

View File

@@ -4,10 +4,8 @@ import (
"fmt"
"runtime"
"testing"
"time"
"github.com/rclone/rclone/fstest/testy"
"github.com/stretchr/testify/assert"
)
func TestStatsGroupOperations(t *testing.T) {
@@ -45,26 +43,17 @@ func TestStatsGroupOperations(t *testing.T) {
t.Parallel()
stats1 := NewStats()
stats1.bytes = 5
stats1.errors = 6
stats1.oldDuration = time.Second
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
stats1.errors = 5
stats2 := NewStats()
stats2.bytes = 10
stats2.errors = 12
stats2.oldDuration = 2 * time.Second
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
sg := newStatsGroups()
sg.set("test1", stats1)
sg.set("test2", stats2)
sum := sg.sum()
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
// dict can iterate in either order
a := timeRanges{stats1.oldTimeRanges[0], stats2.oldTimeRanges[0]}
b := timeRanges{stats2.oldTimeRanges[0], stats1.oldTimeRanges[0]}
if !assert.ObjectsAreEqual(a, sum.oldTimeRanges) {
assert.Equal(t, b, sum.oldTimeRanges)
if sum.bytes != stats1.bytes+stats2.bytes {
t.Fatalf("sum() => bytes %d, expected %d", sum.bytes, stats1.bytes+stats2.bytes)
}
if sum.errors != stats1.errors+stats2.errors {
t.Fatalf("sum() => errors %d, expected %d", sum.errors, stats1.errors+stats2.errors)
}
})

View File

@@ -72,16 +72,8 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
for _, tr := range tm.items {
s = append(s, tr)
}
// sort by time first and if equal by name. Note that the relatively
// low time resolution on Windows can cause equal times.
sort.Slice(s, func(i, j int) bool {
a, b := s[i], s[j]
if a.startedAt.Before(b.startedAt) {
return true
} else if !a.startedAt.Equal(b.startedAt) {
return false
}
return a.remote < b.remote
return s[i].startedAt.Before(s[j].startedAt)
})
return s
}

View File

@@ -172,12 +172,9 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
return false
}
if ht == hash.None {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
if common.Count() == 0 {
checksumWarning.Do(func() {
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
})
}
checksumWarning.Do(func() {
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
})
fs.Debugf(src, "Size of src and dst objects identical")
} else {
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
@@ -1525,11 +1522,12 @@ func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err
}
}
}
} else if fs.Config.Suffix != "" {
} else {
if srcFileName == "" {
return nil, fserrors.FatalError(errors.New("--suffix must be used with a file or with --backup-dir"))
}
// --backup-dir is not set but --suffix is - use the destination as the backupDir
backupDir = fdst
} else {
return nil, fserrors.FatalError(errors.New("internal error: BackupDir called when --backup-dir and --suffix both empty"))
}
if !CanServerSideMove(backupDir) {
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))

View File

@@ -378,8 +378,8 @@ OR
"error": true,
"result": "<Raw command line output>"
}
` + "```" + `
`,
})
}

View File

@@ -45,7 +45,7 @@ func init() {
This takes the following parameters
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
- name: name of the plugin in the format <author>/<plugin_name>
Eg
@@ -212,7 +212,7 @@ func init() {
This takes parameters
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
- name: name of the plugin in the format <author>/<plugin_name>
Eg

View File

@@ -1590,7 +1590,7 @@ func TestSyncCopyDest(t *testing.T) {
}
// Test with BackupDir set
func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeepExtension bool) {
func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
r := fstest.NewRun(t)
defer r.Finalise()
@@ -1599,23 +1599,7 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
}
r.Mkdir(context.Background(), r.Fremote)
if backupDir != "" {
fs.Config.BackupDir = r.FremoteName + "/" + backupDir
backupDir += "/"
} else {
fs.Config.BackupDir = ""
backupDir = "dst/"
// Exclude the suffix from the sync otherwise the sync
// deletes the old backup files
flt, err := filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, flt.AddRule("- *"+suffix))
oldFlt := filter.Active
filter.Active = flt
defer func() {
filter.Active = oldFlt
}()
}
fs.Config.BackupDir = r.FremoteName + "/backup"
fs.Config.Suffix = suffix
fs.Config.SuffixKeepExtension = suffixKeepExtension
defer func() {
@@ -1643,14 +1627,14 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
file1.Path = backupDir + "one" + suffix
file1.Path = "backup/one" + suffix
file1a.Path = "dst/one"
// two should be unchanged
// three should be moved to the backup dir
if suffixKeepExtension {
file3.Path = backupDir + "three" + suffix + ".txt"
file3.Path = "backup/three" + suffix + ".txt"
} else {
file3.Path = backupDir + "three.txt" + suffix
file3.Path = "backup/three.txt" + suffix
}
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
@@ -1668,29 +1652,22 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
file1a.Path = backupDir + "one" + suffix
file1a.Path = "backup/one" + suffix
file1b.Path = "dst/one"
// two should be unchanged
// three should be moved to the backup dir
if suffixKeepExtension {
file3a.Path = backupDir + "three" + suffix + ".txt"
file3a.Path = "backup/three" + suffix + ".txt"
} else {
file3a.Path = backupDir + "three.txt" + suffix
file3a.Path = "backup/three.txt" + suffix
}
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
}
func TestSyncBackupDir(t *testing.T) {
testSyncBackupDir(t, "backup", "", false)
}
func TestSyncBackupDirWithSuffix(t *testing.T) {
testSyncBackupDir(t, "backup", ".bak", false)
}
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "", false) }
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak", false) }
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) {
testSyncBackupDir(t, "backup", "-2019-01-01", true)
}
func TestSyncBackupDirSuffixOnly(t *testing.T) {
testSyncBackupDir(t, "", ".bak", false)
testSyncBackupDir(t, "-2019-01-01", true)
}
// Test with Suffix set

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
var Version = "v1.53.2-DEV"
var Version = "v1.54.0-DEV"

View File

@@ -27,16 +27,15 @@ type Test struct {
//
// FIXME make bucket based remotes set sub-dir automatically???
type Backend struct {
Backend string // name of the backend directory
Remote string // name of the test remote
FastList bool // set to test with -fast-list
Short bool // set to test with -short
OneOnly bool // set to run only one backend test at once
MaxFile string // file size limit
CleanUp bool // when running clean, run cleanup first
Ignore []string // test names to ignore the failure of
Tests []string // paths of tests to run, blank for all
ListRetries int // -list-retries if > 0
Backend string // name of the backend directory
Remote string // name of the test remote
FastList bool // set to test with -fast-list
Short bool // set to test with -short
OneOnly bool // set to run only one backend test at once
MaxFile string // file size limit
CleanUp bool // when running clean, run cleanup first
Ignore []string // test names to ignore the failure of
Tests []string // paths of tests to run, blank for all
}
// includeTest returns true if this backend should be included in this
@@ -80,17 +79,16 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
continue
}
run := &Run{
Remote: b.Remote,
Backend: b.Backend,
Path: t.Path,
FastList: fastlist,
Short: (b.Short && t.Short),
NoRetries: t.NoRetries,
OneOnly: b.OneOnly,
NoBinary: t.NoBinary,
SizeLimit: int64(maxSize),
Ignore: ignore,
ListRetries: b.ListRetries,
Remote: b.Remote,
Backend: b.Backend,
Path: t.Path,
FastList: fastlist,
Short: (b.Short && t.Short),
NoRetries: t.NoRetries,
OneOnly: b.OneOnly,
NoBinary: t.NoBinary,
SizeLimit: int64(maxSize),
Ignore: ignore,
}
if t.AddBackend {
run.Path = path.Join(run.Path, b.Backend)

View File

@@ -20,7 +20,6 @@ backends:
- backend: "b2"
remote: "TestB2:"
fastlist: true
listretries: 5
- backend: "crypt"
remote: "TestCryptDrive:"
fastlist: true
@@ -42,13 +41,15 @@ backends:
remote: "TestChunkerChunk3bNometaLocal:"
fastlist: true
maxfile: 6k
- backend: "chunker"
remote: "TestChunkerMailru:"
fastlist: true
- backend: "chunker"
remote: "TestChunkerChunk50bMailru:"
fastlist: true
maxfile: 10k
# Disable chunker with mailru tests until Mailru is fixed - see
# https://github.com/rclone/rclone/issues/4376
# - backend: "chunker"
# remote: "TestChunkerMailru:"
# fastlist: true
# - backend: "chunker"
# remote: "TestChunkerChunk50bMailru:"
# fastlist: true
# maxfile: 10k
- backend: "chunker"
remote: "TestChunkerChunk50bYandex:"
fastlist: true
@@ -72,10 +73,6 @@ backends:
remote: "TestChunkerChunk50bSHA1HashS3:"
fastlist: true
maxfile: 1k
- backend: "chunker"
remote: "TestChunkerOverCrypt:"
fastlist: true
maxfile: 6k
- backend: "chunker"
remote: "TestChunkerChunk50bMD5QuickS3:"
fastlist: true
@@ -147,12 +144,12 @@ backends:
# ignore:
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
# - backend: "s3"
# remote: "TestS3Ceph:"
# fastlist: true
# ignore:
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
- backend: "s3"
remote: "TestS3Ceph:"
fastlist: true
ignore:
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
- TestIntegration/FsMkdir/FsPutFiles/SetTier
- backend: "s3"
remote: "TestS3Alibaba:"
fastlist: true
@@ -173,11 +170,11 @@ backends:
- backend: "swift"
remote: "TestSwift:"
fastlist: true
# - backend: "swift"
# remote: "TestSwiftCeph:"
# fastlist: true
# ignore:
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
- backend: "swift"
remote: "TestSwiftCeph:"
fastlist: true
ignore:
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
- backend: "yandex"
remote: "TestYandex:"
fastlist: false

View File

@@ -35,17 +35,16 @@ var (
// if retries are needed.
type Run struct {
// Config
Remote string // name of the test remote
Backend string // name of the backend
Path string // path to the source directory
FastList bool // add -fast-list to tests
Short bool // add -short
NoRetries bool // don't retry if set
OneOnly bool // only run test for this backend at once
NoBinary bool // set to not build a binary
SizeLimit int64 // maximum test file size
Ignore map[string]struct{}
ListRetries int // -list-retries if > 0
Remote string // name of the test remote
Backend string // name of the backend
Path string // path to the source directory
FastList bool // add -fast-list to tests
Short bool // add -short
NoRetries bool // don't retry if set
OneOnly bool // only run test for this backend at once
NoBinary bool // set to not build a binary
SizeLimit int64 // maximum test file size
Ignore map[string]struct{}
// Internals
CmdLine []string
CmdString string
@@ -337,12 +336,8 @@ func (r *Run) Init() {
r.CmdLine = []string{"./" + r.BinaryName()}
}
r.CmdLine = append(r.CmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
listRetries := *listRetries
if r.ListRetries > 0 {
listRetries = r.ListRetries
}
if listRetries > 0 {
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries))
if *listRetries > 0 {
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(*listRetries))
}
r.Try = 1
if *verbose {

23586
rclone.1 generated

File diff suppressed because it is too large Load Diff

View File

@@ -342,14 +342,9 @@ func (f *File) Size() int64 {
}
// SetModTime sets the modtime for the file
//
// if NoModTime is set then it does nothing
func (f *File) SetModTime(modTime time.Time) error {
f.mu.Lock()
defer f.mu.Unlock()
if f.d.vfs.Opt.NoModTime {
return nil
}
if f.d.vfs.Opt.ReadOnly {
return EROFS
}

View File

@@ -166,11 +166,6 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
### VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -466,15 +466,9 @@ func (c *Cache) retryFailedResets() {
if len(c.errItems) != 0 {
fs.Debugf(nil, "vfs cache reset: before redoing reset errItems = %v", c.errItems)
for itemName := range c.errItems {
if retryItem, ok := c.item[itemName]; ok {
_, _, err := retryItem.Reset()
if err == nil || !fserrors.IsErrNoSpace(err) {
// TODO: not trying to handle non-ENOSPC errors yet
delete(c.errItems, itemName)
}
} else {
// The retry item was deleted because it was closed.
// No need to redo the failed reset now.
_, _, err := c.item[itemName].Reset()
if err == nil || !fserrors.IsErrNoSpace(err) {
// TODO: not trying to handle non-ENOSPC errors yet
delete(c.errItems, itemName)
}
}
@@ -609,7 +603,7 @@ func (c *Cache) clean(removeCleanFiles bool) {
if os.IsNotExist(err) {
return
}
c.updateUsed()
c.mu.Lock()
oldItems, oldUsed := len(c.item), fs.SizeSuffix(c.used)
c.mu.Unlock()

View File

@@ -230,11 +230,7 @@ func (dls *Downloaders) Close(inErr error) (err error) {
}
}
dls.cancel()
// dls may have entered the periodical (every 5 seconds) kickWaiters() call
// unlock the mutex to allow it to finish so that we can get its dls.wg.Done()
dls.mu.Unlock()
dls.wg.Wait()
dls.mu.Lock()
dls.dls = nil
dls._dispatchWaiters()
dls._closeWaiters(inErr)

View File

@@ -43,13 +43,6 @@ import (
// be taken before Item.mu. writeback may call into Item but Item may
// **not** call writeback methods with Item.mu held
// LL Item reset is invoked by cache cleaner for synchronous recovery
// from ENOSPC errors. The reset operation removes the cache file and
// closes/reopens the downloaders. Although most parts of reset and
// other item operations are done with the item mutex held, the mutex
// is released during fd.WriteAt and downloaders calls. We use preAccess
// and postAccess calls to serialize reset and other item operations.
// Item is stored in the item map
//
// The Info field is written to the backing store to store status
@@ -246,23 +239,8 @@ func (item *Item) _truncate(size int64) (err error) {
// Use open handle if available
fd := item.fd
if fd == nil {
// If the metadata says we have some blockes cached then the
// file should exist, so open without O_CREATE
oFlags := os.O_WRONLY
if item.info.Rs.Size() == 0 {
oFlags |= os.O_CREATE
}
osPath := item.c.toOSPath(item.name) // No locking in Cache
fd, err = file.OpenFile(osPath, oFlags, 0600)
if err != nil && os.IsNotExist(err) {
// If the metadata has info but the file doesn't
// not exist then it has been externally removed
fs.Errorf(item.name, "vfs cache: detected external removal of cache file")
item.info.Rs = nil // show we have no blocks cached
item.info.Dirty = false // file can't be dirty if it doesn't exist
item._removeMeta("cache file externally deleted")
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
}
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return errors.Wrap(err, "vfs cache: truncate: failed to open cache file")
}
@@ -271,7 +249,7 @@ func (item *Item) _truncate(size int64) (err error) {
err = file.SetSparse(fd)
if err != nil {
fs.Errorf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
fs.Debugf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
}
}
@@ -317,8 +295,6 @@ func (item *Item) _truncateToCurrentSize() (err error) {
// extended and the extended data will be filled with zeros. The
// object will be marked as dirty in this case also.
func (item *Item) Truncate(size int64) (err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
defer item.mu.Unlock()
@@ -438,8 +414,6 @@ func (item *Item) _dirty() {
// Dirty marks the item as changed and needing writeback
func (item *Item) Dirty() {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
item._dirty()
item.mu.Unlock()
@@ -472,7 +446,7 @@ func (item *Item) _createFile(osPath string) (err error) {
}
err = file.SetSparse(fd)
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
fs.Debugf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
}
item.fd = fd
@@ -623,8 +597,6 @@ func (item *Item) store(ctx context.Context, storeFn StoreFn) (err error) {
// Close the cache file
func (item *Item) Close(storeFn StoreFn) (err error) {
// defer log.Trace(item.o, "Item.Close")("err=%v", &err)
item.preAccess()
defer item.postAccess()
var (
downloaders *downloaders.Downloaders
syncWriteBack = item.c.opt.WriteBack <= 0
@@ -1182,7 +1154,6 @@ func (item *Item) setModTime(modTime time.Time) {
// ReadAt bytes from the file at off
func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
n = 0
var expBackOff int
for retries := 0; retries < fs.Config.LowLevelRetries; retries++ {
item.preAccess()
n, err = item.readAt(b, off)
@@ -1196,12 +1167,6 @@ func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
break
}
item.c.KickCleaner()
expBackOff = 2 << uint(retries)
time.Sleep(time.Duration(expBackOff) * time.Millisecond) // Exponential back-off the retries
}
if fserrors.IsErrNoSpace(err) {
fs.Errorf(item.name, "vfs cache: failed to _ensure cache after retries %v", err)
}
return n, err
@@ -1233,8 +1198,6 @@ func (item *Item) readAt(b []byte, off int64) (n int, err error) {
// WriteAt bytes to the file at off
func (item *Item) WriteAt(b []byte, off int64) (n int, err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
if item.fd == nil {
item.mu.Unlock()
@@ -1325,8 +1288,6 @@ func (item *Item) WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, e
// this means flushing the file system's in-memory copy of recently written
// data to disk.
func (item *Item) Sync() (err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
defer item.mu.Unlock()
if item.fd == nil {
@@ -1346,8 +1307,6 @@ func (item *Item) Sync() (err error) {
// rename the item
func (item *Item) rename(name string, newName string, newObj fs.Object) (err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
// stop downloader
@@ -1377,5 +1336,6 @@ func (item *Item) rename(name string, newName string, newObj fs.Object) (err err
_ = downloaders.Close(nil)
}
item.c.writeback.Rename(id, newName)
return err
}