mirror of
https://github.com/rclone/rclone.git
synced 2025-12-17 08:43:19 +00:00
Compare commits
114 Commits
fix-5107-a
...
fix-connec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bede3a5d48 | ||
|
|
f52ae75a51 | ||
|
|
9d5c5bf7ab | ||
|
|
53573b4a09 | ||
|
|
3622e064f5 | ||
|
|
6d28ea7ab5 | ||
|
|
b9fd02039b | ||
|
|
1a41c930f3 | ||
|
|
ddb7eb6e0a | ||
|
|
c114695a66 | ||
|
|
fcba51557f | ||
|
|
9393225a1d | ||
|
|
3d3ff61f74 | ||
|
|
d98f192425 | ||
|
|
54771e4402 | ||
|
|
dc286529bc | ||
|
|
7dc7c021db | ||
|
|
fe1aa13069 | ||
|
|
5fa8e7d957 | ||
|
|
9db7c51eaa | ||
|
|
3859fe2f52 | ||
|
|
0caf417779 | ||
|
|
9eab258ffb | ||
|
|
7df57cd625 | ||
|
|
1fd9b483c8 | ||
|
|
93353c431b | ||
|
|
886dfd23e2 | ||
|
|
116a8021bb | ||
|
|
9e2fbe0f1a | ||
|
|
6d65d116df | ||
|
|
edaeb51ea9 | ||
|
|
6e2e2d9eb2 | ||
|
|
20e15e52a9 | ||
|
|
d0f8b4f479 | ||
|
|
58d82a5c73 | ||
|
|
c0c74003f2 | ||
|
|
60bc7a079a | ||
|
|
20c5ca08fb | ||
|
|
fc57648b75 | ||
|
|
8c5c91e68f | ||
|
|
9dd39e8524 | ||
|
|
9c9186183d | ||
|
|
2ccf416e83 | ||
|
|
5577c7b760 | ||
|
|
f6dbb98a1d | ||
|
|
d042f3194f | ||
|
|
524cd327e6 | ||
|
|
b8c1cf7451 | ||
|
|
0fa68bda02 | ||
|
|
1378bfee63 | ||
|
|
d6870473a1 | ||
|
|
12cd322643 | ||
|
|
1406b6c3c9 | ||
|
|
088a83872d | ||
|
|
cb46092883 | ||
|
|
a2cd5d8fa3 | ||
|
|
1fe2460e38 | ||
|
|
ef5c212f9b | ||
|
|
268a7ff7b8 | ||
|
|
b47d6001a9 | ||
|
|
a4c4ddf052 | ||
|
|
4cc2a7f342 | ||
|
|
c72d2c67ed | ||
|
|
9deab5a563 | ||
|
|
da5b0cb611 | ||
|
|
0187bc494a | ||
|
|
2bdbf00fa3 | ||
|
|
9ee3ad70e9 | ||
|
|
ce182adf46 | ||
|
|
97fc3b9046 | ||
|
|
e59acd16c6 | ||
|
|
acfd7e2403 | ||
|
|
f47893873d | ||
|
|
b9a015e5b9 | ||
|
|
d72d9e591a | ||
|
|
df451e1e70 | ||
|
|
d9959b0271 | ||
|
|
f2c0f82fc6 | ||
|
|
f76c6cc893 | ||
|
|
5e95877840 | ||
|
|
8b491f7f3d | ||
|
|
aea8776a43 | ||
|
|
c387eb8c09 | ||
|
|
a12b2746b4 | ||
|
|
3dbef2b2fd | ||
|
|
f111e0eaf8 | ||
|
|
96207f342c | ||
|
|
e25ac4dcf0 | ||
|
|
28f6efe955 | ||
|
|
f17d7c0012 | ||
|
|
3761cf68b4 | ||
|
|
71554c1371 | ||
|
|
8a46dd1b57 | ||
|
|
3b21857097 | ||
|
|
a10fbf16ea | ||
|
|
f4750928ee | ||
|
|
657be2ace5 | ||
|
|
feaaca4987 | ||
|
|
ebd9462ea6 | ||
|
|
6b9e4f939d | ||
|
|
687a3b1832 | ||
|
|
25d5ed763c | ||
|
|
5e038a5e1e | ||
|
|
4b4e531846 | ||
|
|
89e8fb4818 | ||
|
|
b9bf91c510 | ||
|
|
40b58d59ad | ||
|
|
4fbb50422c | ||
|
|
8d847a4e94 | ||
|
|
e3e08a48cb | ||
|
|
ff6868900d | ||
|
|
aab076029f | ||
|
|
294f090361 | ||
|
|
301e1ad982 |
29
.github/ISSUE_TEMPLATE/Bug.md
vendored
29
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,19 +5,31 @@ about: Report a problem with rclone
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
||||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
We understand you are having a problem with rclone; we want to help you with that!
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
**STOP and READ**
|
||||||
|
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||||
|
Please show the effort you've put in to solving the problem and please be specific.
|
||||||
|
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||||
|
|
||||||
|
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||||
|
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||||
|
|
||||||
|
If you can still replicate it or just got a question then please use the rclone forum:
|
||||||
|
|
||||||
https://forum.rclone.org/
|
https://forum.rclone.org/
|
||||||
|
|
||||||
instead of filing an issue for a quick response.
|
for a quick response instead of filing an issue on this repo.
|
||||||
|
|
||||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
If nothing else helps, then please fill in the info below which helps us help you.
|
||||||
|
|
||||||
https://beta.rclone.org/
|
**DO NOT REDACT** any information except passwords/keys/personal info.
|
||||||
|
|
||||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
You should use 3 backticks to begin and end your paste to make it readable.
|
||||||
|
|
||||||
|
Make sure to include a log obtained with '-vv'.
|
||||||
|
|
||||||
|
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
||||||
|
|
||||||
Thank you
|
Thank you
|
||||||
|
|
||||||
@@ -25,6 +37,11 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### What is the problem you are having with rclone?
|
#### What is the problem you are having with rclone?
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
16
.github/ISSUE_TEMPLATE/Feature.md
vendored
16
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
|
|||||||
|
|
||||||
Welcome :-)
|
Welcome :-)
|
||||||
|
|
||||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
So you've got an idea to improve rclone? We love that!
|
||||||
|
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||||
|
|
||||||
Here is a checklist of things to do:
|
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
||||||
|
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||||
|
|
||||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
If it still isn't there, here is a checklist of things to do:
|
||||||
2. Discuss on the forum first: https://forum.rclone.org/
|
|
||||||
|
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
||||||
|
2. Discuss on the forum: https://forum.rclone.org/
|
||||||
3. Make a feature request issue (this is the right place!).
|
3. Make a feature request issue (this is the right place!).
|
||||||
4. Be prepared to get involved making the feature :-)
|
4. Be prepared to get involved making the feature :-)
|
||||||
|
|
||||||
@@ -23,6 +27,10 @@ The Rclone Developers
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### What is your current rclone version (output from `rclone version`)?
|
#### What is your current rclone version (output from `rclone version`)?
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
104
.github/workflows/build.yml
vendored
104
.github/workflows/build.yml
vendored
@@ -213,46 +213,94 @@ jobs:
|
|||||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
xgo:
|
android:
|
||||||
timeout-minutes: 60
|
timeout-minutes: 30
|
||||||
name: "xgo cross compile"
|
name: "android-all"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Upgrade together with NDK version
|
||||||
|
- name: Set up Go 1.14
|
||||||
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
go-version: 1.14
|
||||||
path: ./src/github.com/rclone/rclone
|
|
||||||
|
|
||||||
- name: Set environment variables
|
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||||
|
- name: Force NDK version
|
||||||
|
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||||
|
|
||||||
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
|
- name: Set global environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Cross-compile rclone
|
- name: build native rclone
|
||||||
run: |
|
|
||||||
docker pull billziss/xgo-cgofuse
|
|
||||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
|
||||||
# xgo \
|
|
||||||
# -image=billziss/xgo-cgofuse \
|
|
||||||
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
|
||||||
# -tags cmount \
|
|
||||||
# -dest build \
|
|
||||||
# .
|
|
||||||
xgo \
|
|
||||||
-image=billziss/xgo-cgofuse \
|
|
||||||
-targets=android/* \
|
|
||||||
-dest build \
|
|
||||||
.
|
|
||||||
|
|
||||||
- name: Build rclone
|
|
||||||
shell: bash
|
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
|
- name: arm-v7a Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||||
|
echo 'GOARM=7' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
- name: arm-v7a build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||||
|
|
||||||
|
- name: arm64-v8a Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: arm64-v8a build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||||
|
|
||||||
|
- name: x86 Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: x86 build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||||
|
|
||||||
|
- name: x64 Set environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: x64 build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
run: |
|
run: |
|
||||||
make ci_upload
|
make ci_upload
|
||||||
|
|||||||
@@ -33,10 +33,11 @@ page](https://github.com/rclone/rclone).
|
|||||||
|
|
||||||
Now in your terminal
|
Now in your terminal
|
||||||
|
|
||||||
go get -u github.com/rclone/rclone
|
git clone https://github.com/rclone/rclone.git
|
||||||
cd $GOPATH/src/github.com/rclone/rclone
|
cd rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
|
go build
|
||||||
|
|
||||||
Make a branch to add your new feature
|
Make a branch to add your new feature
|
||||||
|
|
||||||
|
|||||||
1277
MANUAL.html
generated
1277
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
1849
MANUAL.txt
generated
1849
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
6
Makefile
6
Makefile
@@ -187,10 +187,10 @@ upload_github:
|
|||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
|
|
||||||
cross: doc
|
cross: doc
|
||||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
@@ -198,7 +198,7 @@ log_since_last_release:
|
|||||||
git log $(LAST_TAG)..
|
git log $(LAST_TAG)..
|
||||||
|
|
||||||
compile_all:
|
compile_all:
|
||||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
|
|
||||||
ci_upload:
|
ci_upload:
|
||||||
sudo chown -R $$USER build
|
sudo chown -R $$USER build
|
||||||
|
|||||||
18
RELEASE.md
18
RELEASE.md
@@ -76,6 +76,24 @@ Now
|
|||||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||||
or needs to be updated then rebuild like this.
|
or needs to be updated then rebuild like this.
|
||||||
|
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||||
|
|
||||||
|
```
|
||||||
|
git co v1.54.1
|
||||||
|
docker pull golang
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
docker buildx create --name actions_builder --use
|
||||||
|
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||||
|
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||||
|
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||||
|
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||||
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
|
docker buildx stop actions_builder
|
||||||
|
```
|
||||||
|
|
||||||
|
### Old build for linux/amd64 only
|
||||||
|
|
||||||
```
|
```
|
||||||
docker pull golang
|
docker pull golang
|
||||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||||
|
|||||||
@@ -217,6 +217,23 @@ This option controls how often unused buffers will be removed from the pool.`,
|
|||||||
encoder.EncodeDel |
|
encoder.EncodeDel |
|
||||||
encoder.EncodeBackSlash |
|
encoder.EncodeBackSlash |
|
||||||
encoder.EncodeRightPeriod),
|
encoder.EncodeRightPeriod),
|
||||||
|
}, {
|
||||||
|
Name: "public_access",
|
||||||
|
Help: "Public access level of a container: blob, container.",
|
||||||
|
Default: string(azblob.PublicAccessNone),
|
||||||
|
Examples: []fs.OptionExample{
|
||||||
|
{
|
||||||
|
Value: string(azblob.PublicAccessNone),
|
||||||
|
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
|
||||||
|
}, {
|
||||||
|
Value: string(azblob.PublicAccessBlob),
|
||||||
|
Help: "Blob data within this container can be read via anonymous request.",
|
||||||
|
}, {
|
||||||
|
Value: string(azblob.PublicAccessContainer),
|
||||||
|
Help: "Allow full public read access for container and blob data.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -241,6 +258,7 @@ type Options struct {
|
|||||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
PublicAccess string `config:"public_access"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
@@ -262,6 +280,7 @@ type Fs struct {
|
|||||||
imdsPacer *fs.Pacer // Same but for IMDS
|
imdsPacer *fs.Pacer // Same but for IMDS
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
pool *pool.Pool // memory pool
|
pool *pool.Pool // memory pool
|
||||||
|
publicAccess azblob.PublicAccessType // Container Public Access Level
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes an azure object
|
// Object describes an azure object
|
||||||
@@ -335,6 +354,19 @@ func validateAccessTier(tier string) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validatePublicAccess checks if azureblob supports use supplied public access level
|
||||||
|
func validatePublicAccess(publicAccess string) bool {
|
||||||
|
switch publicAccess {
|
||||||
|
case string(azblob.PublicAccessNone),
|
||||||
|
string(azblob.PublicAccessBlob),
|
||||||
|
string(azblob.PublicAccessContainer):
|
||||||
|
// valid cases
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
401, // Unauthorized (e.g. "Token has expired")
|
401, // Unauthorized (e.g. "Token has expired")
|
||||||
@@ -502,6 +534,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !validatePublicAccess((opt.PublicAccess)) {
|
||||||
|
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||||
|
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||||
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
@@ -520,6 +557,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
opt.MemoryPoolUseMmap,
|
opt.MemoryPoolUseMmap,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
f.publicAccess = azblob.PublicAccessType(opt.PublicAccess)
|
||||||
f.imdsPacer.SetRetries(5) // per IMDS documentation
|
f.imdsPacer.SetRetries(5) // per IMDS documentation
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -1084,7 +1122,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
}
|
}
|
||||||
// now try to create the container
|
// now try to create the container
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, f.publicAccess)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||||
switch storageErr.ServiceCode() {
|
switch storageErr.ServiceCode() {
|
||||||
|
|||||||
@@ -1744,6 +1744,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||||||
ContentType: resp.Header.Get("Content-Type"),
|
ContentType: resp.Header.Get("Content-Type"),
|
||||||
Info: Info,
|
Info: Info,
|
||||||
}
|
}
|
||||||
|
// When reading files from B2 via cloudflare using
|
||||||
|
// --b2-download-url cloudflare strips the Content-Length
|
||||||
|
// headers (presumably so it can inject stuff) so use the old
|
||||||
|
// length read from the listing.
|
||||||
|
if info.Size < 0 {
|
||||||
|
info.Size = o.size
|
||||||
|
}
|
||||||
return resp, info, nil
|
return resp, info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -323,7 +323,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
}
|
}
|
||||||
authRetry := false
|
authRetry := false
|
||||||
|
|
||||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
|
||||||
authRetry = true
|
authRetry = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -277,13 +277,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
|
|
||||||
baseName, basePath, err := fspath.Parse(remote)
|
baseName, basePath, err := fspath.SplitFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||||
}
|
}
|
||||||
if baseName != "" {
|
|
||||||
baseName += ":"
|
|
||||||
}
|
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||||
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
||||||
|
|||||||
@@ -404,7 +404,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
if srcHash != "" && dstHash != "" {
|
||||||
|
if srcHash != dstHash {
|
||||||
// remove object
|
// remove object
|
||||||
err = o.Remove(ctx)
|
err = o.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -412,6 +413,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
}
|
}
|
||||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.newObject(o), nil
|
return f.newObject(o), nil
|
||||||
|
|||||||
@@ -641,7 +641,10 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry determines whether a given err rates being retried
|
// shouldRetry determines whether a given err rates being retried
|
||||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@@ -695,20 +698,20 @@ func containsString(slice []string, s string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getFile returns drive.File for the ID passed and fields passed in
|
// getFile returns drive.File for the ID passed and fields passed in
|
||||||
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
|
func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (info *drive.File, err error) {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
info, err = f.svc.Files.Get(ID).
|
info, err = f.svc.Files.Get(ID).
|
||||||
Fields(fields).
|
Fields(fields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRootID returns the canonical ID for the "root" ID
|
// getRootID returns the canonical ID for the "root" ID
|
||||||
func (f *Fs) getRootID() (string, error) {
|
func (f *Fs) getRootID(ctx context.Context) (string, error) {
|
||||||
info, err := f.getFile("root", "id")
|
info, err := f.getFile(ctx, "root", "id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||||
}
|
}
|
||||||
@@ -814,7 +817,7 @@ OUTER:
|
|||||||
var files *drive.FileList
|
var files *drive.FileList
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
|
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "couldn't list directory")
|
return false, errors.Wrap(err, "couldn't list directory")
|
||||||
@@ -837,7 +840,7 @@ OUTER:
|
|||||||
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
|
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
item, err = f.resolveShortcut(item)
|
item, err = f.resolveShortcut(ctx, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "list")
|
return false, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
@@ -855,7 +858,7 @@ OUTER:
|
|||||||
if !found {
|
if !found {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, exportName, _, _ := f.findExportFormat(item)
|
_, exportName, _, _ := f.findExportFormat(ctx, item)
|
||||||
if exportName == "" || exportName != title {
|
if exportName == "" || exportName != title {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1155,7 +1158,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.rootFolderID = f.opt.TeamDriveID
|
f.rootFolderID = f.opt.TeamDriveID
|
||||||
} else {
|
} else {
|
||||||
// otherwise look up the actual root ID
|
// otherwise look up the actual root ID
|
||||||
rootID, err := f.getRootID()
|
rootID, err := f.getRootID(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
// 404 means that this scope does not have permission to get the
|
// 404 means that this scope does not have permission to get the
|
||||||
@@ -1328,26 +1331,26 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
|||||||
// newObjectWithInfo creates an fs.Object for any drive.File
|
// newObjectWithInfo creates an fs.Object for any drive.File
|
||||||
//
|
//
|
||||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
|
||||||
// If item has MD5 sum or a length it is a file stored on drive
|
// If item has MD5 sum or a length it is a file stored on drive
|
||||||
if info.Md5Checksum != "" || info.Size > 0 {
|
if info.Md5Checksum != "" || info.Size > 0 {
|
||||||
return f.newRegularObject(remote, info), nil
|
return f.newRegularObject(remote, info), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
|
extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info)
|
||||||
return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
|
return f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
|
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
|
||||||
//
|
//
|
||||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||||
func (f *Fs) newObjectWithExportInfo(
|
func (f *Fs) newObjectWithExportInfo(
|
||||||
remote string, info *drive.File,
|
ctx context.Context, remote string, info *drive.File,
|
||||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||||
// Note that resolveShortcut will have been called already if
|
// Note that resolveShortcut will have been called already if
|
||||||
// we are being called from a listing. However the drive.Item
|
// we are being called from a listing. However the drive.Item
|
||||||
// will have been resolved so this will do nothing.
|
// will have been resolved so this will do nothing.
|
||||||
info, err = f.resolveShortcut(info)
|
info, err = f.resolveShortcut(ctx, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "new object")
|
return nil, errors.Wrap(err, "new object")
|
||||||
}
|
}
|
||||||
@@ -1395,7 +1398,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
remote = remote[:len(remote)-len(extension)]
|
remote = remote[:len(remote)-len(extension)]
|
||||||
obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
|
obj, err := f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
|
||||||
switch {
|
switch {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1412,7 +1415,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||||||
pathID = actualID(pathID)
|
pathID = actualID(pathID)
|
||||||
found, err = f.list(ctx, []string{pathID}, leaf, true, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
found, err = f.list(ctx, []string{pathID}, leaf, true, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||||
if !f.opt.SkipGdocs {
|
if !f.opt.SkipGdocs {
|
||||||
_, exportName, _, isDocument := f.findExportFormat(item)
|
_, exportName, _, isDocument := f.findExportFormat(ctx, item)
|
||||||
if exportName == leaf {
|
if exportName == leaf {
|
||||||
pathIDOut = item.Id
|
pathIDOut = item.Id
|
||||||
return true
|
return true
|
||||||
@@ -1447,8 +1450,8 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
info, err = f.svc.Files.Create(createInfo).
|
info, err = f.svc.Files.Create(createInfo).
|
||||||
Fields("id").
|
Fields("id").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -1483,15 +1486,15 @@ func linkTemplate(mt string) *template.Template {
|
|||||||
})
|
})
|
||||||
return _linkTemplates[mt]
|
return _linkTemplates[mt]
|
||||||
}
|
}
|
||||||
func (f *Fs) fetchFormats() {
|
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||||
fetchFormatsOnce.Do(func() {
|
fetchFormatsOnce.Do(func() {
|
||||||
var about *drive.About
|
var about *drive.About
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
about, err = f.svc.About.Get().
|
about, err = f.svc.About.Get().
|
||||||
Fields("exportFormats,importFormats").
|
Fields("exportFormats,importFormats").
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
|
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
|
||||||
@@ -1508,8 +1511,8 @@ func (f *Fs) fetchFormats() {
|
|||||||
// if necessary.
|
// if necessary.
|
||||||
//
|
//
|
||||||
// if the fetch fails then it will not export any drive formats
|
// if the fetch fails then it will not export any drive formats
|
||||||
func (f *Fs) exportFormats() map[string][]string {
|
func (f *Fs) exportFormats(ctx context.Context) map[string][]string {
|
||||||
f.fetchFormats()
|
f.fetchFormats(ctx)
|
||||||
return _exportFormats
|
return _exportFormats
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1517,8 +1520,8 @@ func (f *Fs) exportFormats() map[string][]string {
|
|||||||
// if necessary.
|
// if necessary.
|
||||||
//
|
//
|
||||||
// if the fetch fails then it will not import any drive formats
|
// if the fetch fails then it will not import any drive formats
|
||||||
func (f *Fs) importFormats() map[string][]string {
|
func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||||
f.fetchFormats()
|
f.fetchFormats(ctx)
|
||||||
return _importFormats
|
return _importFormats
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1527,9 +1530,9 @@ func (f *Fs) importFormats() map[string][]string {
|
|||||||
//
|
//
|
||||||
// Look through the exportExtensions and find the first format that can be
|
// Look through the exportExtensions and find the first format that can be
|
||||||
// converted. If none found then return ("", "", false)
|
// converted. If none found then return ("", "", false)
|
||||||
func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
|
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||||
extension, mimeType string, isDocument bool) {
|
extension, mimeType string, isDocument bool) {
|
||||||
exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
|
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||||
if isDocument {
|
if isDocument {
|
||||||
for _, _extension := range f.exportExtensions {
|
for _, _extension := range f.exportExtensions {
|
||||||
_mimeType := mime.TypeByExtension(_extension)
|
_mimeType := mime.TypeByExtension(_extension)
|
||||||
@@ -1556,8 +1559,8 @@ func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
|
|||||||
//
|
//
|
||||||
// Look through the exportExtensions and find the first format that can be
|
// Look through the exportExtensions and find the first format that can be
|
||||||
// converted. If none found then return ("", "", "", false)
|
// converted. If none found then return ("", "", "", false)
|
||||||
func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
|
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||||
if extension != "" {
|
if extension != "" {
|
||||||
filename = item.Name + extension
|
filename = item.Name + extension
|
||||||
}
|
}
|
||||||
@@ -1569,9 +1572,9 @@ func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType s
|
|||||||
// MIME type is returned
|
// MIME type is returned
|
||||||
//
|
//
|
||||||
// When no match is found "" is returned.
|
// When no match is found "" is returned.
|
||||||
func (f *Fs) findImportFormat(mimeType string) string {
|
func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
|
||||||
mimeType = fixMimeType(mimeType)
|
mimeType = fixMimeType(mimeType)
|
||||||
ifs := f.importFormats()
|
ifs := f.importFormats(ctx)
|
||||||
for _, mt := range f.importMimeTypes {
|
for _, mt := range f.importMimeTypes {
|
||||||
if mt == mimeType {
|
if mt == mimeType {
|
||||||
importMimeTypes := ifs[mimeType]
|
importMimeTypes := ifs[mimeType]
|
||||||
@@ -1604,7 +1607,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||||
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
|
entry, err := f.itemToDirEntry(ctx, path.Join(dir, item.Name), item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@@ -1717,7 +1720,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
remote := path.Join(paths[i], item.Name)
|
remote := path.Join(paths[i], item.Name)
|
||||||
entry, err := f.itemToDirEntry(remote, item)
|
entry, err := f.itemToDirEntry(ctx, remote, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@@ -1982,7 +1985,7 @@ func isShortcut(item *drive.File) bool {
|
|||||||
// Note that we assume shortcuts can't point to shortcuts. Google
|
// Note that we assume shortcuts can't point to shortcuts. Google
|
||||||
// drive web interface doesn't offer the option to create a shortcut
|
// drive web interface doesn't offer the option to create a shortcut
|
||||||
// to a shortcut. The documentation is silent on the issue.
|
// to a shortcut. The documentation is silent on the issue.
|
||||||
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
|
func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *drive.File, err error) {
|
||||||
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
|
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
|
||||||
return item, nil
|
return item, nil
|
||||||
}
|
}
|
||||||
@@ -1990,7 +1993,7 @@ func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error)
|
|||||||
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
|
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
|
||||||
return item, nil
|
return item, nil
|
||||||
}
|
}
|
||||||
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
|
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
||||||
@@ -2012,7 +2015,7 @@ func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error)
|
|||||||
// itemToDirEntry converts a drive.File to an fs.DirEntry.
|
// itemToDirEntry converts a drive.File to an fs.DirEntry.
|
||||||
// When the drive.File cannot be represented as an fs.DirEntry
|
// When the drive.File cannot be represented as an fs.DirEntry
|
||||||
// (nil, nil) is returned.
|
// (nil, nil) is returned.
|
||||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
|
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File) (entry fs.DirEntry, err error) {
|
||||||
switch {
|
switch {
|
||||||
case item.MimeType == driveFolderType:
|
case item.MimeType == driveFolderType:
|
||||||
// cache the directory ID for later lookups
|
// cache the directory ID for later lookups
|
||||||
@@ -2026,7 +2029,7 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry,
|
|||||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||||
// ignore object
|
// ignore object
|
||||||
default:
|
default:
|
||||||
entry, err = f.newObjectWithInfo(remote, item)
|
entry, err = f.newObjectWithInfo(ctx, remote, item)
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@@ -2093,12 +2096,12 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
importMimeType := ""
|
importMimeType := ""
|
||||||
|
|
||||||
if f.importMimeTypes != nil && !f.opt.SkipGdocs {
|
if f.importMimeTypes != nil && !f.opt.SkipGdocs {
|
||||||
importMimeType = f.findImportFormat(srcMimeType)
|
importMimeType = f.findImportFormat(ctx, srcMimeType)
|
||||||
|
|
||||||
if isInternalMimeType(importMimeType) {
|
if isInternalMimeType(importMimeType) {
|
||||||
remote = remote[:len(remote)-len(srcExt)]
|
remote = remote[:len(remote)-len(srcExt)]
|
||||||
|
|
||||||
exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
|
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||||
if exportExt == "" {
|
if exportExt == "" {
|
||||||
return nil, errors.Errorf("No export format found for %q", importMimeType)
|
return nil, errors.Errorf("No export format found for %q", importMimeType)
|
||||||
}
|
}
|
||||||
@@ -2128,8 +2131,8 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -2141,7 +2144,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return f.newObjectWithInfo(remote, info)
|
return f.newObjectWithInfo(ctx, remote, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
// MergeDirs merges the contents of all the directories passed
|
||||||
@@ -2183,8 +2186,8 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
AddParents(dstDir.ID()).
|
AddParents(dstDir.ID()).
|
||||||
Fields("").
|
Fields("").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
||||||
@@ -2217,14 +2220,14 @@ func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
|
|||||||
_, err = f.svc.Files.Update(id, &info).
|
_, err = f.svc.Files.Update(id, &info).
|
||||||
Fields("").
|
Fields("").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
} else {
|
} else {
|
||||||
err = f.svc.Files.Delete(id).
|
err = f.svc.Files.Delete(id).
|
||||||
Fields("").
|
Fields("").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
}
|
}
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2337,11 +2340,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
if isDoc {
|
if isDoc {
|
||||||
// preserve the description on copy for docs
|
// preserve the description on copy for docs
|
||||||
info, err := f.getFile(actualID(srcObj.id), "description")
|
info, err := f.getFile(ctx, actualID(srcObj.id), "description")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read description for Google Doc")
|
fs.Errorf(srcObj, "Failed to read description for Google Doc: %v", err)
|
||||||
}
|
} else {
|
||||||
createInfo.Description = info.Description
|
createInfo.Description = info.Description
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// don't overwrite the description on copy for files
|
// don't overwrite the description on copy for files
|
||||||
// this should work for docs but it doesn't - it is probably a bug in Google Drive
|
// this should work for docs but it doesn't - it is probably a bug in Google Drive
|
||||||
@@ -2357,13 +2361,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
newObject, err := f.newObjectWithInfo(remote, info)
|
newObject, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -2457,7 +2461,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2475,7 +2479,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
|||||||
var td *drive.Drive
|
var td *drive.Drive
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
|
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get Shared Drive info")
|
return errors.Wrap(err, "failed to get Shared Drive info")
|
||||||
@@ -2498,7 +2502,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
|
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
|
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
|
||||||
@@ -2570,14 +2574,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
AddParents(dstParents).
|
AddParents(dstParents).
|
||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.newObjectWithInfo(remote, info)
|
return f.newObjectWithInfo(ctx, remote, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
@@ -2607,8 +2611,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
_, err = f.svc.Permissions.Create(id, permission).
|
_, err = f.svc.Permissions.Create(id, permission).
|
||||||
Fields("").
|
Fields("").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -2650,8 +2654,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
AddParents(dstDirectoryID).
|
AddParents(dstDirectoryID).
|
||||||
Fields("").
|
Fields("").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -2669,7 +2673,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
go func() {
|
go func() {
|
||||||
// get the StartPageToken early so all changes from now on get processed
|
// get the StartPageToken early so all changes from now on get processed
|
||||||
startPageToken, err := f.changeNotifyStartPageToken()
|
startPageToken, err := f.changeNotifyStartPageToken(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Infof(f, "Failed to get StartPageToken: %s", err)
|
fs.Infof(f, "Failed to get StartPageToken: %s", err)
|
||||||
}
|
}
|
||||||
@@ -2694,7 +2698,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
}
|
}
|
||||||
case <-tickerC:
|
case <-tickerC:
|
||||||
if startPageToken == "" {
|
if startPageToken == "" {
|
||||||
startPageToken, err = f.changeNotifyStartPageToken()
|
startPageToken, err = f.changeNotifyStartPageToken(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Infof(f, "Failed to get StartPageToken: %s", err)
|
fs.Infof(f, "Failed to get StartPageToken: %s", err)
|
||||||
continue
|
continue
|
||||||
@@ -2709,15 +2713,15 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
|
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||||
var startPageToken *drive.StartPageToken
|
var startPageToken *drive.StartPageToken
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
|
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
|
||||||
if f.isTeamDrive {
|
if f.isTeamDrive {
|
||||||
changes.DriveId(f.opt.TeamDriveID)
|
changes.DriveId(f.opt.TeamDriveID)
|
||||||
}
|
}
|
||||||
startPageToken, err = changes.Do()
|
startPageToken, err = changes.Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@@ -2746,7 +2750,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
changesCall.Spaces("appDataFolder")
|
changesCall.Spaces("appDataFolder")
|
||||||
}
|
}
|
||||||
changeList, err = changesCall.Context(ctx).Do()
|
changeList, err = changesCall.Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@@ -2942,8 +2946,8 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
|
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return dstFs.shouldRetry(err)
|
return dstFs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "shortcut creation failed")
|
return nil, errors.Wrap(err, "shortcut creation failed")
|
||||||
@@ -2951,24 +2955,24 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
if isDir {
|
if isDir {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return dstFs.newObjectWithInfo(dstPath, info)
|
return dstFs.newObjectWithInfo(ctx, dstPath, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// List all team drives
|
// List all team drives
|
||||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
|
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err error) {
|
||||||
drives = []*drive.TeamDrive{}
|
drives = []*drive.Drive{}
|
||||||
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
|
listTeamDrives := f.svc.Drives.List().PageSize(100)
|
||||||
var defaultFs Fs // default Fs with default Options
|
var defaultFs Fs // default Fs with default Options
|
||||||
for {
|
for {
|
||||||
var teamDrives *drive.TeamDriveList
|
var teamDrives *drive.DriveList
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||||
return defaultFs.shouldRetry(err)
|
return defaultFs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return drives, errors.Wrap(err, "listing Team Drives failed")
|
return drives, errors.Wrap(err, "listing Team Drives failed")
|
||||||
}
|
}
|
||||||
drives = append(drives, teamDrives.TeamDrives...)
|
drives = append(drives, teamDrives.Drives...)
|
||||||
if teamDrives.NextPageToken == "" {
|
if teamDrives.NextPageToken == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -3005,8 +3009,8 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
|||||||
_, err := f.svc.Files.Update(item.Id, &update).
|
_, err := f.svc.Files.Update(item.Id, &update).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Fields("trashed").
|
Fields("trashed").
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "failed to restore")
|
err = errors.Wrap(err, "failed to restore")
|
||||||
@@ -3048,7 +3052,7 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
|||||||
|
|
||||||
// copy file with id to dest
|
// copy file with id to dest
|
||||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||||
info, err := f.getFile(id, f.fileFields)
|
info, err := f.getFile(ctx, id, f.fileFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't find id")
|
return errors.Wrap(err, "couldn't find id")
|
||||||
}
|
}
|
||||||
@@ -3056,7 +3060,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
|||||||
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||||
}
|
}
|
||||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||||
o, err := f.newObjectWithInfo(info.Name, info)
|
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -3065,7 +3069,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if destLeaf == "" {
|
if destLeaf == "" {
|
||||||
destLeaf = info.Name
|
destLeaf = path.Base(o.Remote())
|
||||||
}
|
}
|
||||||
if destDir == "" {
|
if destDir == "" {
|
||||||
destDir = "."
|
destDir = "."
|
||||||
@@ -3357,7 +3361,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
|||||||
|
|
||||||
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||||
if !f.opt.SkipGdocs {
|
if !f.opt.SkipGdocs {
|
||||||
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
extension, exportName, exportMimeType, isDocument = f.findExportFormat(ctx, item)
|
||||||
if exportName == leaf {
|
if exportName == leaf {
|
||||||
info = item
|
info = item
|
||||||
return true
|
return true
|
||||||
@@ -3408,8 +3412,8 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -3447,7 +3451,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
|||||||
_ = res.Body.Close() // ignore error
|
_ = res.Body.Close() // ignore error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, nil, err
|
return req, nil, err
|
||||||
@@ -3539,8 +3543,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
|
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
|
||||||
Fields("downloadUrl").
|
Fields("downloadUrl").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
|
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
|
||||||
@@ -3620,8 +3624,8 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
|||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
|
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
|
||||||
Do()
|
Context(ctx).Do()
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -3664,7 +3668,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
|
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -3688,7 +3692,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
|||||||
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
||||||
return errors.Errorf("can't update google document type without --drive-import-formats")
|
return errors.Errorf("can't update google document type without --drive-import-formats")
|
||||||
}
|
}
|
||||||
importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
|
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
|
||||||
if importMimeType == "" {
|
if importMimeType == "" {
|
||||||
return errors.Errorf("no import format found for %q", srcMimeType)
|
return errors.Errorf("no import format found for %q", srcMimeType)
|
||||||
}
|
}
|
||||||
@@ -3705,7 +3709,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
|||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
remote = remote[:len(remote)-o.extLen]
|
remote = remote[:len(remote)-o.extLen]
|
||||||
|
|
||||||
newO, err := o.fs.newObjectWithInfo(remote, info)
|
newO, err := o.fs.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -111,6 +111,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalFindExportFormat(t *testing.T) {
|
func TestInternalFindExportFormat(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
item := &drive.File{
|
item := &drive.File{
|
||||||
Name: "file",
|
Name: "file",
|
||||||
MimeType: "application/vnd.google-apps.document",
|
MimeType: "application/vnd.google-apps.document",
|
||||||
@@ -128,7 +129,7 @@ func TestInternalFindExportFormat(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
f := new(Fs)
|
f := new(Fs)
|
||||||
f.exportExtensions = test.extensions
|
f.exportExtensions = test.extensions
|
||||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
|
||||||
assert.Equal(t, test.wantExtension, gotExtension)
|
assert.Equal(t, test.wantExtension, gotExtension)
|
||||||
if test.wantExtension != "" {
|
if test.wantExtension != "" {
|
||||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
defer googleapi.CloseBody(res)
|
defer googleapi.CloseBody(res)
|
||||||
err = googleapi.CheckResponse(res)
|
err = googleapi.CheckResponse(res)
|
||||||
}
|
}
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -202,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
|||||||
err = rx.f.pacer.Call(func() (bool, error) {
|
err = rx.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||||
again, err := rx.f.shouldRetry(err)
|
again, err := rx.f.shouldRetry(ctx, err)
|
||||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||||
again = false
|
again = false
|
||||||
err = nil
|
err = nil
|
||||||
|
|||||||
@@ -310,7 +310,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case auth.RateLimitAPIError:
|
case auth.RateLimitAPIError:
|
||||||
if e.RateLimitError.RetryAfter > 0 {
|
if e.RateLimitError.RetryAfter > 0 {
|
||||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
@@ -1084,13 +1084,30 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||||
Path: absPath,
|
Path: absPath,
|
||||||
// FIXME this gives settings_error/not_authorized/.. errors
|
Settings: &sharing.SharedLinkSettings{
|
||||||
// and the expires setting isn't in the documentation so remove
|
RequestedVisibility: &sharing.RequestedVisibility{
|
||||||
// for now.
|
Tagged: dropbox.Tagged{Tag: sharing.RequestedVisibilityPublic},
|
||||||
// Settings: &sharing.SharedLinkSettings{
|
},
|
||||||
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
|
Audience: &sharing.LinkAudience{
|
||||||
// },
|
Tagged: dropbox.Tagged{Tag: sharing.LinkAudiencePublic},
|
||||||
|
},
|
||||||
|
Access: &sharing.RequestedLinkAccessLevel{
|
||||||
|
Tagged: dropbox.Tagged{Tag: sharing.RequestedLinkAccessLevelViewer},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
if expire < fs.DurationOff {
|
||||||
|
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||||
|
createArg.Settings.Expires = expiryTime
|
||||||
|
}
|
||||||
|
// FIXME note we can't set Settings for non enterprise dropbox
|
||||||
|
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||||
|
// however this only goes wrong when we set Expires, so as a
|
||||||
|
// work-around remove Settings unless expire is set.
|
||||||
|
if expire == fs.DurationOff {
|
||||||
|
createArg.Settings = nil
|
||||||
|
}
|
||||||
|
|
||||||
var linkRes sharing.IsSharedLinkMetadata
|
var linkRes sharing.IsSharedLinkMetadata
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||||
|
|||||||
@@ -348,8 +348,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fileUploadResponse.Links) != 1 {
|
if len(fileUploadResponse.Links) == 0 {
|
||||||
return nil, errors.New("unexpected amount of files")
|
return nil, errors.New("upload response not found")
|
||||||
|
} else if len(fileUploadResponse.Links) > 1 {
|
||||||
|
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
||||||
}
|
}
|
||||||
|
|
||||||
link := fileUploadResponse.Links[0]
|
link := fileUploadResponse.Links[0]
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
@@ -33,6 +34,12 @@ var (
|
|||||||
currentUser = env.CurrentUser()
|
currentUser = env.CurrentUser()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
minSleep = 10 * time.Millisecond
|
||||||
|
maxSleep = 2 * time.Second
|
||||||
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -104,6 +111,11 @@ given, rclone will empty the connection pool.
|
|||||||
Set to 0 to keep connections indefinitely.
|
Set to 0 to keep connections indefinitely.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "close_timeout",
|
||||||
|
Help: "Maximum time to wait for a response to close.",
|
||||||
|
Default: fs.Duration(60 * time.Second),
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -132,6 +144,7 @@ type Options struct {
|
|||||||
DisableEPSV bool `config:"disable_epsv"`
|
DisableEPSV bool `config:"disable_epsv"`
|
||||||
DisableMLSD bool `config:"disable_mlsd"`
|
DisableMLSD bool `config:"disable_mlsd"`
|
||||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
|
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,6 +164,7 @@ type Fs struct {
|
|||||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
tokens *pacer.TokenDispenser
|
tokens *pacer.TokenDispenser
|
||||||
tlsConf *tls.Config
|
tlsConf *tls.Config
|
||||||
|
pacer *fs.Pacer // pacer for FTP connections
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes an FTP file
|
// Object describes an FTP file
|
||||||
@@ -244,8 +258,24 @@ func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
|||||||
return conn, err
|
return conn, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||||
|
// retried. It returns the err as a convenience
|
||||||
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
switch errX := err.(type) {
|
||||||
|
case *textproto.Error:
|
||||||
|
switch errX.Code {
|
||||||
|
case ftp.StatusNotAvailable:
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fserrors.ShouldRetry(err), err
|
||||||
|
}
|
||||||
|
|
||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
|
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
dCtx := dialCtx{f, ctx}
|
dCtx := dialCtx{f, ctx}
|
||||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||||
@@ -267,18 +297,22 @@ func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
|
|||||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||||
}
|
}
|
||||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
return shouldRetry(ctx, err)
|
||||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
|
||||||
}
|
}
|
||||||
err = c.Login(f.user, f.pass)
|
err = c.Login(f.user, f.pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = c.Quit()
|
_ = c.Quit()
|
||||||
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
|
return shouldRetry(ctx, err)
|
||||||
return nil, errors.Wrap(err, "ftpConnection Login")
|
|
||||||
}
|
}
|
||||||
return c, nil
|
return false, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
||||||
|
}
|
||||||
|
return c, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get an FTP connection from the pool, or open a new one
|
// Get an FTP connection from the pool, or open a new one
|
||||||
@@ -411,6 +445,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
tlsConf: tlsConfig,
|
tlsConf: tlsConfig,
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
@@ -931,8 +966,8 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
go func() {
|
go func() {
|
||||||
errchan <- f.rc.Close()
|
errchan <- f.rc.Close()
|
||||||
}()
|
}()
|
||||||
// Wait for Close for up to 60 seconds
|
// Wait for Close for up to 60 seconds by default
|
||||||
timer := time.NewTimer(60 * time.Second)
|
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||||
select {
|
select {
|
||||||
case err = <-errchan:
|
case err = <-errchan:
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
|
|||||||
@@ -44,6 +44,7 @@ func init() {
|
|||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "nounc",
|
Name: "nounc",
|
||||||
Help: "Disable UNC (long path names) conversion on Windows",
|
Help: "Disable UNC (long path names) conversion on Windows",
|
||||||
|
Advanced: runtime.GOOS != "windows",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "true",
|
Value: "true",
|
||||||
Help: "Disables long file names",
|
Help: "Disables long file names",
|
||||||
@@ -1144,6 +1145,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
err = file.PreAllocate(src.Size(), f)
|
err = file.PreAllocate(src.Size(), f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||||
|
if err == file.ErrDiskFull {
|
||||||
|
_ = f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out = f
|
out = f
|
||||||
|
|||||||
@@ -234,7 +234,10 @@ var retryErrorCodes = []int{
|
|||||||
// shouldRetry returns a boolean as to whether this response and err
|
// shouldRetry returns a boolean as to whether this response and err
|
||||||
// deserve to be retried. It returns the err as a convenience.
|
// deserve to be retried. It returns the err as a convenience.
|
||||||
// Retries password authorization (once) in a special case of access denied.
|
// Retries password authorization (once) in a special case of access denied.
|
||||||
func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
|
func shouldRetry(ctx context.Context, res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
|
if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
|
||||||
reAuthErr := f.reAuthorize(opts, err)
|
reAuthErr := f.reAuthorize(opts, err)
|
||||||
return reAuthErr == nil, err // return an original error
|
return reAuthErr == nil, err // return an original error
|
||||||
@@ -600,7 +603,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
|||||||
var info api.ItemInfoResponse
|
var info api.ItemInfoResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -736,7 +739,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
|||||||
)
|
)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -800,7 +803,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
|
|||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err = f.srv.Call(ctx, &opts)
|
res, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
closeBody(res)
|
closeBody(res)
|
||||||
@@ -1073,7 +1076,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
|
|||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err = f.srv.Call(ctx, &opts)
|
res, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
closeBody(res)
|
closeBody(res)
|
||||||
@@ -1216,7 +1219,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
|||||||
var response api.GenericResponse
|
var response api.GenericResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
@@ -1288,7 +1291,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var response api.GenericBodyResponse
|
var response api.GenericBodyResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1392,7 +1395,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
|||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err = f.srv.Call(ctx, &opts)
|
res, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
closeBody(res)
|
closeBody(res)
|
||||||
@@ -1483,7 +1486,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
var response api.GenericBodyResponse
|
var response api.GenericBodyResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err == nil && response.Body != "" {
|
if err == nil && response.Body != "" {
|
||||||
@@ -1524,7 +1527,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
var response api.CleanupResponse
|
var response api.CleanupResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1557,7 +1560,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
var info api.UserInfoResponse
|
var info api.UserInfoResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(res, err, f, &opts)
|
return shouldRetry(ctx, res, err, f, &opts)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -2076,7 +2079,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
|||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
res, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(res, err, o.fs, &opts)
|
return shouldRetry(ctx, res, err, o.fs, &opts)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
closeBody(res)
|
closeBody(res)
|
||||||
@@ -2172,7 +2175,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
}
|
}
|
||||||
opts.RootURL = server
|
opts.RootURL = server
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
res, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(res, err, o.fs, &opts)
|
return shouldRetry(ctx, res, err, o.fs, &opts)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if res != nil && res.Body != nil {
|
if res != nil && res.Body != nil {
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
@@ -158,7 +159,10 @@ func parsePath(path string) (root string) {
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||||
// retried. It returns the err as a convenience
|
// retried. It returns the err as a convenience
|
||||||
func shouldRetry(err error) (bool, error) {
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
// Let the mega library handle the low level retries
|
// Let the mega library handle the low level retries
|
||||||
return false, err
|
return false, err
|
||||||
/*
|
/*
|
||||||
@@ -171,8 +175,8 @@ func shouldRetry(err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) {
|
||||||
rootNode, err := f.findRoot(false)
|
rootNode, err := f.findRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -237,7 +241,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Find the root node and check if it is a file or not
|
// Find the root node and check if it is a file or not
|
||||||
_, err = f.findRoot(false)
|
_, err = f.findRoot(ctx, false)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
// root node found and is a directory
|
// root node found and is a directory
|
||||||
@@ -307,8 +311,8 @@ func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err
|
|||||||
// lookupDir looks up the node for the directory of the name given
|
// lookupDir looks up the node for the directory of the name given
|
||||||
//
|
//
|
||||||
// if create is true it tries to create the root directory if not found
|
// if create is true it tries to create the root directory if not found
|
||||||
func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
|
func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
|
||||||
rootNode, err := f.findRoot(false)
|
rootNode, err := f.findRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -316,15 +320,15 @@ func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// lookupParentDir finds the parent node for the remote passed in
|
// lookupParentDir finds the parent node for the remote passed in
|
||||||
func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
|
func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||||
parent, leaf := path.Split(remote)
|
parent, leaf := path.Split(remote)
|
||||||
dirNode, err = f.lookupDir(parent)
|
dirNode, err = f.lookupDir(ctx, parent)
|
||||||
return dirNode, leaf, err
|
return dirNode, leaf, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkdir makes the directory and any parent directories for the
|
// mkdir makes the directory and any parent directories for the
|
||||||
// directory of the name given
|
// directory of the name given
|
||||||
func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
||||||
f.mkdirMu.Lock()
|
f.mkdirMu.Lock()
|
||||||
defer f.mkdirMu.Unlock()
|
defer f.mkdirMu.Unlock()
|
||||||
|
|
||||||
@@ -358,7 +362,7 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
|
|||||||
// create directory called name in node
|
// create directory called name in node
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
node, err = f.srv.CreateDir(name, node)
|
node, err = f.srv.CreateDir(name, node)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "mkdir create node failed")
|
return nil, errors.Wrap(err, "mkdir create node failed")
|
||||||
@@ -368,20 +372,20 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mkdirParent creates the parent directory of remote
|
// mkdirParent creates the parent directory of remote
|
||||||
func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
|
func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||||
rootNode, err := f.findRoot(true)
|
rootNode, err := f.findRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
parent, leaf := path.Split(remote)
|
parent, leaf := path.Split(remote)
|
||||||
dirNode, err = f.mkdir(rootNode, parent)
|
dirNode, err = f.mkdir(ctx, rootNode, parent)
|
||||||
return dirNode, leaf, err
|
return dirNode, leaf, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// findRoot looks up the root directory node and returns it.
|
// findRoot looks up the root directory node and returns it.
|
||||||
//
|
//
|
||||||
// if create is true it tries to create the root directory if not found
|
// if create is true it tries to create the root directory if not found
|
||||||
func (f *Fs) findRoot(create bool) (*mega.Node, error) {
|
func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
||||||
f.rootNodeMu.Lock()
|
f.rootNodeMu.Lock()
|
||||||
defer f.rootNodeMu.Unlock()
|
defer f.rootNodeMu.Unlock()
|
||||||
|
|
||||||
@@ -403,7 +407,7 @@ func (f *Fs) findRoot(create bool) (*mega.Node, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//..not found so create the root directory
|
//..not found so create the root directory
|
||||||
f._rootNode, err = f.mkdir(absRoot, f.root)
|
f._rootNode, err = f.mkdir(ctx, absRoot, f.root)
|
||||||
return f._rootNode, err
|
return f._rootNode, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -433,7 +437,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
|
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
|
||||||
deleteErr := f.pacer.Call(func() (bool, error) {
|
deleteErr := f.pacer.Call(func() (bool, error) {
|
||||||
err := f.srv.Delete(item, true)
|
err := f.srv.Delete(item, true)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if deleteErr != nil {
|
if deleteErr != nil {
|
||||||
err = deleteErr
|
err = deleteErr
|
||||||
@@ -447,7 +451,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -457,7 +461,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error
|
|||||||
// Set info
|
// Set info
|
||||||
err = o.setMetaData(info)
|
err = o.setMetaData(info)
|
||||||
} else {
|
} else {
|
||||||
err = o.readMetaData() // reads info and meta, returning an error
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -468,7 +472,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error
|
|||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// list the objects into the function supplied
|
// list the objects into the function supplied
|
||||||
@@ -506,7 +510,7 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
dirNode, err := f.lookupDir(dir)
|
dirNode, err := f.lookupDir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -518,7 +522,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
case mega.FILE:
|
case mega.FILE:
|
||||||
o, err := f.newObjectWithInfo(remote, info)
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@@ -542,8 +546,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Returns the dirNode, object, leaf and error
|
// Returns the dirNode, object, leaf and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||||
dirNode, leaf, err = f.mkdirParent(remote)
|
dirNode, leaf, err = f.mkdirParent(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, leaf, err
|
return nil, nil, leaf, err
|
||||||
}
|
}
|
||||||
@@ -565,7 +569,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
@@ -591,7 +595,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
o, _, _, err := f.createObject(remote, modTime, size)
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -600,30 +604,30 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
rootNode, err := f.findRoot(true)
|
rootNode, err := f.findRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = f.mkdir(rootNode, dir)
|
_, err = f.mkdir(ctx, rootNode, dir)
|
||||||
return errors.Wrap(err, "Mkdir failed")
|
return errors.Wrap(err, "Mkdir failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteNode removes a file or directory, observing useTrash
|
// deleteNode removes a file or directory, observing useTrash
|
||||||
func (f *Fs) deleteNode(node *mega.Node) (err error) {
|
func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
err = f.srv.Delete(node, f.opt.HardDelete)
|
err = f.srv.Delete(node, f.opt.HardDelete)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// purgeCheck removes the directory dir, if check is set then it
|
// purgeCheck removes the directory dir, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
f.mkdirMu.Lock()
|
f.mkdirMu.Lock()
|
||||||
defer f.mkdirMu.Unlock()
|
defer f.mkdirMu.Unlock()
|
||||||
|
|
||||||
rootNode, err := f.findRoot(false)
|
rootNode, err := f.findRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -644,7 +648,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
|
|
||||||
waitEvent := f.srv.WaitEventsStart()
|
waitEvent := f.srv.WaitEventsStart()
|
||||||
|
|
||||||
err = f.deleteNode(dirNode)
|
err = f.deleteNode(ctx, dirNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "delete directory node failed")
|
return errors.Wrap(err, "delete directory node failed")
|
||||||
}
|
}
|
||||||
@@ -662,7 +666,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@@ -676,13 +680,13 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
|
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
|
||||||
//
|
//
|
||||||
// info will be updates
|
// info will be updates
|
||||||
func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
|
func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
|
||||||
var (
|
var (
|
||||||
dstFs = f
|
dstFs = f
|
||||||
srcDirNode, dstDirNode *mega.Node
|
srcDirNode, dstDirNode *mega.Node
|
||||||
@@ -692,12 +696,12 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
|
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
// lookup or create the destination parent directory
|
// lookup or create the destination parent directory
|
||||||
dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
|
dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote)
|
||||||
} else {
|
} else {
|
||||||
// find or create the parent of the root directory
|
// find or create the parent of the root directory
|
||||||
absRoot := dstFs.srv.FS.GetRoot()
|
absRoot := dstFs.srv.FS.GetRoot()
|
||||||
dstParent, dstLeaf = path.Split(dstFs.root)
|
dstParent, dstLeaf = path.Split(dstFs.root)
|
||||||
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
|
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server-side move failed to make dst parent dir")
|
return errors.Wrap(err, "server-side move failed to make dst parent dir")
|
||||||
@@ -705,7 +709,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
|
|
||||||
if srcRemote != "" {
|
if srcRemote != "" {
|
||||||
// lookup the existing parent directory
|
// lookup the existing parent directory
|
||||||
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
|
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote)
|
||||||
} else {
|
} else {
|
||||||
// lookup the existing root parent
|
// lookup the existing root parent
|
||||||
absRoot := srcFs.srv.FS.GetRoot()
|
absRoot := srcFs.srv.FS.GetRoot()
|
||||||
@@ -721,7 +725,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
|
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
err = f.srv.Move(info, dstDirNode)
|
err = f.srv.Move(info, dstDirNode)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server-side move failed")
|
return errors.Wrap(err, "server-side move failed")
|
||||||
@@ -735,7 +739,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
|
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server-side rename failed")
|
return errors.Wrap(err, "server-side rename failed")
|
||||||
@@ -767,7 +771,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
|
err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -798,13 +802,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the source
|
// find the source
|
||||||
info, err := srcFs.lookupDir(srcRemote)
|
info, err := srcFs.lookupDir(ctx, srcRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the destination doesn't exist
|
// check the destination doesn't exist
|
||||||
_, err = dstFs.lookupDir(dstRemote)
|
_, err = dstFs.lookupDir(ctx, dstRemote)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
} else if err != fs.ErrorDirNotFound {
|
} else if err != fs.ErrorDirNotFound {
|
||||||
@@ -812,7 +816,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
err = f.move(dstRemote, srcFs, srcRemote, info)
|
err = f.move(ctx, dstRemote, srcFs, srcRemote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -838,7 +842,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||||
root, err := f.findRoot(false)
|
root, err := f.findRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
||||||
}
|
}
|
||||||
@@ -886,7 +890,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
|
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
err = f.srv.Move(info, dstDirNode)
|
err = f.srv.Move(info, dstDirNode)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
|
||||||
@@ -894,7 +898,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
// rmdir (into trash) the now empty source directory
|
// rmdir (into trash) the now empty source directory
|
||||||
fs.Infof(srcDir, "removing empty directory")
|
fs.Infof(srcDir, "removing empty directory")
|
||||||
err = f.deleteNode(srcDirNode)
|
err = f.deleteNode(ctx, srcDirNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||||
}
|
}
|
||||||
@@ -908,7 +912,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
q, err = f.srv.GetQuota()
|
q, err = f.srv.GetQuota()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get Mega Quota")
|
return nil, errors.Wrap(err, "failed to get Mega Quota")
|
||||||
@@ -963,11 +967,11 @@ func (o *Object) setMetaData(info *mega.Node) (err error) {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.info != nil {
|
if o.info != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
err = fs.ErrorObjectNotFound
|
err = fs.ErrorObjectNotFound
|
||||||
@@ -998,6 +1002,7 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// openObject represents a download in progress
|
// openObject represents a download in progress
|
||||||
type openObject struct {
|
type openObject struct {
|
||||||
|
ctx context.Context
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
o *Object
|
o *Object
|
||||||
d *mega.Download
|
d *mega.Download
|
||||||
@@ -1008,14 +1013,14 @@ type openObject struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get the next chunk
|
// get the next chunk
|
||||||
func (oo *openObject) getChunk() (err error) {
|
func (oo *openObject) getChunk(ctx context.Context) (err error) {
|
||||||
if oo.id >= oo.d.Chunks() {
|
if oo.id >= oo.d.Chunks() {
|
||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
var chunk []byte
|
var chunk []byte
|
||||||
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
||||||
chunk, err = oo.d.DownloadChunk(oo.id)
|
chunk, err = oo.d.DownloadChunk(oo.id)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1045,7 +1050,7 @@ func (oo *openObject) Read(p []byte) (n int, err error) {
|
|||||||
oo.skip -= int64(size)
|
oo.skip -= int64(size)
|
||||||
}
|
}
|
||||||
if len(oo.chunk) == 0 {
|
if len(oo.chunk) == 0 {
|
||||||
err = oo.getChunk()
|
err = oo.getChunk(oo.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -1068,7 +1073,7 @@ func (oo *openObject) Close() (err error) {
|
|||||||
}
|
}
|
||||||
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = oo.d.Finish()
|
err = oo.d.Finish()
|
||||||
return shouldRetry(err)
|
return shouldRetry(oo.ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to finish download")
|
return errors.Wrap(err, "failed to finish download")
|
||||||
@@ -1096,13 +1101,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
var d *mega.Download
|
var d *mega.Download
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
d, err = o.fs.srv.NewDownload(o.info)
|
d, err = o.fs.srv.NewDownload(o.info)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open download file failed")
|
return nil, errors.Wrap(err, "open download file failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
oo := &openObject{
|
oo := &openObject{
|
||||||
|
ctx: ctx,
|
||||||
o: o,
|
o: o,
|
||||||
d: d,
|
d: d,
|
||||||
skip: offset,
|
skip: offset,
|
||||||
@@ -1125,7 +1131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
remote := o.Remote()
|
remote := o.Remote()
|
||||||
|
|
||||||
// Create the parent directory
|
// Create the parent directory
|
||||||
dirNode, leaf, err := o.fs.mkdirParent(remote)
|
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "update make parent dir failed")
|
return errors.Wrap(err, "update make parent dir failed")
|
||||||
}
|
}
|
||||||
@@ -1133,7 +1139,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var u *mega.Upload
|
var u *mega.Upload
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
|
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload file failed to create session")
|
return errors.Wrap(err, "upload file failed to create session")
|
||||||
@@ -1154,7 +1160,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = u.UploadChunk(id, chunk)
|
err = u.UploadChunk(id, chunk)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload file failed to upload chunk")
|
return errors.Wrap(err, "upload file failed to upload chunk")
|
||||||
@@ -1165,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var info *mega.Node
|
var info *mega.Node
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
info, err = u.Finish()
|
info, err = u.Finish()
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to finish upload")
|
return errors.Wrap(err, "failed to finish upload")
|
||||||
@@ -1173,7 +1179,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// If the upload succeeded and the original object existed, then delete it
|
// If the upload succeeded and the original object existed, then delete it
|
||||||
if o.info != nil {
|
if o.info != nil {
|
||||||
err = o.fs.deleteNode(o.info)
|
err = o.fs.deleteNode(ctx, o.info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload failed to remove old version")
|
return errors.Wrap(err, "upload failed to remove old version")
|
||||||
}
|
}
|
||||||
@@ -1185,7 +1191,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
err := o.fs.deleteNode(o.info)
|
err := o.fs.deleteNode(ctx, o.info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove object failed")
|
return errors.Wrap(err, "Remove object failed")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -361,6 +361,11 @@ This will only work if you are copying between two OneDrive *Personal* drives AN
|
|||||||
the files to copy are already shared between them. In other cases, rclone will
|
the files to copy are already shared between them. In other cases, rclone will
|
||||||
fall back to normal copy (which will be slightly slower).`,
|
fall back to normal copy (which will be slightly slower).`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "list_chunk",
|
||||||
|
Help: "Size of listing chunk.",
|
||||||
|
Default: 1000,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_versions",
|
Name: "no_versions",
|
||||||
Default: false,
|
Default: false,
|
||||||
@@ -468,6 +473,7 @@ type Options struct {
|
|||||||
DriveType string `config:"drive_type"`
|
DriveType string `config:"drive_type"`
|
||||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
|
ListChunk int64 `config:"list_chunk"`
|
||||||
NoVersions bool `config:"no_versions"`
|
NoVersions bool `config:"no_versions"`
|
||||||
LinkScope string `config:"link_scope"`
|
LinkScope string `config:"link_scope"`
|
||||||
LinkType string `config:"link_type"`
|
LinkType string `config:"link_type"`
|
||||||
@@ -549,7 +555,10 @@ var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
retry := false
|
retry := false
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
switch resp.StatusCode {
|
switch resp.StatusCode {
|
||||||
@@ -596,7 +605,7 @@ func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID s
|
|||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
return info, resp, err
|
return info, resp, err
|
||||||
@@ -612,7 +621,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
opts.Path = strings.TrimSuffix(opts.Path, ":")
|
opts.Path = strings.TrimSuffix(opts.Path, ":")
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return info, resp, err
|
return info, resp, err
|
||||||
}
|
}
|
||||||
@@ -868,7 +877,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
|||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//fmt.Printf("...Error %v\n", err)
|
//fmt.Printf("...Error %v\n", err)
|
||||||
@@ -893,14 +902,14 @@ type listAllFn func(*api.Item) bool
|
|||||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||||
// Top parameter asks for bigger pages of data
|
// Top parameter asks for bigger pages of data
|
||||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||||
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
|
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
|
||||||
OUTER:
|
OUTER:
|
||||||
for {
|
for {
|
||||||
var result api.ListChildrenResponse
|
var result api.ListChildrenResponse
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, errors.Wrap(err, "couldn't list files")
|
||||||
@@ -1037,7 +1046,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
|||||||
|
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.Call(ctx, &opts)
|
resp, err := f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1193,7 +1202,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, ©Req, nil)
|
resp, err = f.srv.CallJSON(ctx, &opts, ©Req, nil)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1286,7 +1295,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var info api.Item
|
var info api.Item
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1353,7 +1362,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
var info api.Item
|
var info api.Item
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1379,7 +1388,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "about failed")
|
return nil, errors.Wrap(err, "about failed")
|
||||||
@@ -1420,7 +1429,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
Password: f.opt.LinkPassword,
|
Password: f.opt.LinkPassword,
|
||||||
}
|
}
|
||||||
|
|
||||||
if expire < fs.Duration(time.Hour*24*365*100) {
|
if expire < fs.DurationOff {
|
||||||
expiry := time.Now().Add(time.Duration(expire))
|
expiry := time.Now().Add(time.Duration(expire))
|
||||||
share.Expiry = &expiry
|
share.Expiry = &expiry
|
||||||
}
|
}
|
||||||
@@ -1429,7 +1438,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
var result api.CreateShareLinkResponse
|
var result api.CreateShareLinkResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
|
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@@ -1474,7 +1483,7 @@ func (o *Object) deleteVersions(ctx context.Context) error {
|
|||||||
var versions api.VersionsResponse
|
var versions api.VersionsResponse
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1501,7 +1510,7 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
|
|||||||
opts.NoResponse = true
|
opts.NoResponse = true
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1652,7 +1661,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
|||||||
var info *api.Item
|
var info *api.Item
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
// Remove versions if required
|
// Remove versions if required
|
||||||
if o.fs.opt.NoVersions {
|
if o.fs.opt.NoVersions {
|
||||||
@@ -1694,7 +1703,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1722,7 +1731,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
|||||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return response, err
|
return response, err
|
||||||
}
|
}
|
||||||
@@ -1737,7 +1746,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@@ -1797,11 +1806,11 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
|||||||
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
|
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
}
|
}
|
||||||
body, err = rest.ReadBody(resp)
|
body, err = rest.ReadBody(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
}
|
}
|
||||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||||
// we are done :)
|
// we are done :)
|
||||||
@@ -1824,7 +1833,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1895,7 +1904,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
|||||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -1137,7 +1137,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// opts.Body=0), so upload it as a multipart form POST with
|
// opts.Body=0), so upload it as a multipart form POST with
|
||||||
// Content-Length set.
|
// Content-Length set.
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
|
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1399,7 +1399,10 @@ var retryErrorCodes = []int{
|
|||||||
//S3 is pretty resilient, and the built in retry handling is probably sufficient
|
//S3 is pretty resilient, and the built in retry handling is probably sufficient
|
||||||
// as it should notice closed connections and timeouts which are the most likely
|
// as it should notice closed connections and timeouts which are the most likely
|
||||||
// sort of failure modes
|
// sort of failure modes
|
||||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||||
if awsError, ok := err.(awserr.Error); ok {
|
if awsError, ok := err.(awserr.Error); ok {
|
||||||
// Simple case, check the original embedded error in case it's generically retryable
|
// Simple case, check the original embedded error in case it's generically retryable
|
||||||
@@ -1411,7 +1414,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
|||||||
// 301 if wrong region for bucket - can only update if running from a bucket
|
// 301 if wrong region for bucket - can only update if running from a bucket
|
||||||
if f.rootBucket != "" {
|
if f.rootBucket != "" {
|
||||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||||
urfbErr := f.updateRegionForBucket(f.rootBucket)
|
urfbErr := f.updateRegionForBucket(ctx, f.rootBucket)
|
||||||
if urfbErr != nil {
|
if urfbErr != nil {
|
||||||
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
||||||
return false, err
|
return false, err
|
||||||
@@ -1559,6 +1562,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
|||||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||||
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
|
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
|
||||||
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
||||||
|
// Set the name of the profile if supplied
|
||||||
|
awsSessionOpts.Profile = opt.Profile
|
||||||
}
|
}
|
||||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1741,7 +1746,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Gets the bucket location
|
// Gets the bucket location
|
||||||
func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
|
||||||
req := s3.GetBucketLocationInput{
|
req := s3.GetBucketLocationInput{
|
||||||
Bucket: &bucket,
|
Bucket: &bucket,
|
||||||
}
|
}
|
||||||
@@ -1749,7 +1754,7 @@ func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
|||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.c.GetBucketLocation(&req)
|
resp, err = f.c.GetBucketLocation(&req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -1759,8 +1764,8 @@ func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
|||||||
|
|
||||||
// Updates the region for the bucket by reading the region from the
|
// Updates the region for the bucket by reading the region from the
|
||||||
// bucket then updating the session.
|
// bucket then updating the session.
|
||||||
func (f *Fs) updateRegionForBucket(bucket string) error {
|
func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
|
||||||
region, err := f.getBucketLocation(bucket)
|
region, err := f.getBucketLocation(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "reading bucket location failed")
|
return errors.Wrap(err, "reading bucket location failed")
|
||||||
}
|
}
|
||||||
@@ -1854,7 +1859,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||||
@@ -2001,7 +2006,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
var resp *s3.ListBucketsOutput
|
var resp *s3.ListBucketsOutput
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.c.ListBucketsWithContext(ctx, &req)
|
resp, err = f.c.ListBucketsWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -2116,7 +2121,7 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
|
|||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.c.HeadBucketWithContext(ctx, &req)
|
_, err := f.c.HeadBucketWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
@@ -2152,7 +2157,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
|||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
|
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
|
||||||
@@ -2182,7 +2187,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.c.DeleteBucketWithContext(ctx, &req)
|
_, err := f.c.DeleteBucketWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Infof(f, "Bucket %q deleted", bucket)
|
fs.Infof(f, "Bucket %q deleted", bucket)
|
||||||
@@ -2242,7 +2247,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
|||||||
}
|
}
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.c.CopyObjectWithContext(ctx, req)
|
_, err := f.c.CopyObjectWithContext(ctx, req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2286,7 +2291,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
|||||||
if err := f.pacer.Call(func() (bool, error) {
|
if err := f.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, req)
|
cout, err = f.c.CreateMultipartUploadWithContext(ctx, req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -2302,7 +2307,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
|||||||
UploadId: uid,
|
UploadId: uid,
|
||||||
RequestPayer: req.RequestPayer,
|
RequestPayer: req.RequestPayer,
|
||||||
})
|
})
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
})()
|
})()
|
||||||
|
|
||||||
@@ -2325,7 +2330,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
|||||||
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
|
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
|
||||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
}
|
}
|
||||||
parts = append(parts, &s3.CompletedPart{
|
parts = append(parts, &s3.CompletedPart{
|
||||||
PartNumber: &partNum,
|
PartNumber: &partNum,
|
||||||
@@ -2347,7 +2352,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
|||||||
RequestPayer: req.RequestPayer,
|
RequestPayer: req.RequestPayer,
|
||||||
UploadId: uid,
|
UploadId: uid,
|
||||||
})
|
})
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2578,7 +2583,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
reqCopy.Key = &bucketPath
|
reqCopy.Key = &bucketPath
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.c.RestoreObject(&reqCopy)
|
_, err = f.c.RestoreObject(&reqCopy)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
st.Status = err.Error()
|
st.Status = err.Error()
|
||||||
@@ -2626,7 +2631,7 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo
|
|||||||
var resp *s3.ListMultipartUploadsOutput
|
var resp *s3.ListMultipartUploadsOutput
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.c.ListMultipartUploads(&req)
|
resp, err = f.c.ListMultipartUploads(&req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key)
|
return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key)
|
||||||
@@ -2801,7 +2806,7 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
|
|||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||||
@@ -2957,7 +2962,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
var err error
|
var err error
|
||||||
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
|
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
|
||||||
err = httpReq.Send()
|
err = httpReq.Send()
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err, ok := err.(awserr.RequestFailure); ok {
|
if err, ok := err.(awserr.RequestFailure); ok {
|
||||||
if err.Code() == "InvalidObjectState" {
|
if err.Code() == "InvalidObjectState" {
|
||||||
@@ -3016,7 +3021,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload failed to initialise")
|
return errors.Wrap(err, "multipart upload failed to initialise")
|
||||||
@@ -3035,7 +3040,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
UploadId: uid,
|
UploadId: uid,
|
||||||
RequestPayer: req.RequestPayer,
|
RequestPayer: req.RequestPayer,
|
||||||
})
|
})
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if errCancel != nil {
|
if errCancel != nil {
|
||||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||||
@@ -3111,7 +3116,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if partNum <= int64(concurrency) {
|
if partNum <= int64(concurrency) {
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
}
|
}
|
||||||
// retry all chunks once have done the first batch
|
// retry all chunks once have done the first batch
|
||||||
return true, err
|
return true, err
|
||||||
@@ -3151,7 +3156,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
RequestPayer: req.RequestPayer,
|
RequestPayer: req.RequestPayer,
|
||||||
UploadId: uid,
|
UploadId: uid,
|
||||||
})
|
})
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload failed to finalise")
|
return errors.Wrap(err, "multipart upload failed to finalise")
|
||||||
@@ -3306,11 +3311,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var err error
|
var err error
|
||||||
resp, err = o.fs.srv.Do(httpReq)
|
resp, err = o.fs.srv.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
}
|
}
|
||||||
body, err := rest.ReadBody(resp)
|
body, err := rest.ReadBody(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
}
|
}
|
||||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -3361,7 +3366,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
|
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -682,7 +682,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
|||||||
"need_idx_progress": {"true"},
|
"need_idx_progress": {"true"},
|
||||||
"replace": {"1"},
|
"replace": {"1"},
|
||||||
}
|
}
|
||||||
formReader, contentType, _, err := rest.MultipartUpload(in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make multipart upload")
|
return nil, errors.Wrap(err, "failed to make multipart upload")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -286,6 +287,7 @@ type Fs struct {
|
|||||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
pacer *fs.Pacer // pacer for operations
|
pacer *fs.Pacer // pacer for operations
|
||||||
savedpswd string
|
savedpswd string
|
||||||
|
transfers int32 // count in use references
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||||
@@ -348,6 +350,23 @@ func (c *conn) closed() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Show that we are doing an upload or download
|
||||||
|
//
|
||||||
|
// Call removeTransfer() when done
|
||||||
|
func (f *Fs) addTransfer() {
|
||||||
|
atomic.AddInt32(&f.transfers, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show the upload or download done
|
||||||
|
func (f *Fs) removeTransfer() {
|
||||||
|
atomic.AddInt32(&f.transfers, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTransfers shows whether there are any transfers in progress
|
||||||
|
func (f *Fs) getTransfers() int32 {
|
||||||
|
return atomic.LoadInt32(&f.transfers)
|
||||||
|
}
|
||||||
|
|
||||||
// Open a new connection to the SFTP server.
|
// Open a new connection to the SFTP server.
|
||||||
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||||
// Rate limit rate of new connections
|
// Rate limit rate of new connections
|
||||||
@@ -395,8 +414,12 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
|||||||
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
|
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
|
||||||
opts = append(opts,
|
opts = append(opts,
|
||||||
sftp.UseFstat(f.opt.UseFstat),
|
sftp.UseFstat(f.opt.UseFstat),
|
||||||
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
// FIXME disabled after library reversion
|
||||||
|
// sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||||
)
|
)
|
||||||
|
if f.opt.DisableConcurrentReads { // FIXME
|
||||||
|
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
|
||||||
|
}
|
||||||
|
|
||||||
return sftp.NewClientPipe(pr, pw, opts...)
|
return sftp.NewClientPipe(pr, pw, opts...)
|
||||||
}
|
}
|
||||||
@@ -474,6 +497,13 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
|||||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||||
f.poolMu.Lock()
|
f.poolMu.Lock()
|
||||||
defer f.poolMu.Unlock()
|
defer f.poolMu.Unlock()
|
||||||
|
if transfers := f.getTransfers(); transfers != 0 {
|
||||||
|
fs.Debugf(f, "Not closing %d unused connections as %d transfers in progress", len(f.pool), transfers)
|
||||||
|
if f.opt.IdleTimeout > 0 {
|
||||||
|
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if f.opt.IdleTimeout > 0 {
|
if f.opt.IdleTimeout > 0 {
|
||||||
f.drain.Stop()
|
f.drain.Stop()
|
||||||
}
|
}
|
||||||
@@ -1354,7 +1384,9 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
//
|
//
|
||||||
// it also updates the info field
|
// it also updates the info field
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
if o.fs.opt.SetModTime {
|
if !o.fs.opt.SetModTime {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
c, err := o.fs.getSftpConnection(ctx)
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime")
|
return errors.Wrap(err, "SetModTime")
|
||||||
@@ -1364,8 +1396,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime failed")
|
return errors.Wrap(err, "SetModTime failed")
|
||||||
}
|
}
|
||||||
}
|
err = o.stat(ctx)
|
||||||
err := o.stat(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime stat failed")
|
return errors.Wrap(err, "SetModTime stat failed")
|
||||||
}
|
}
|
||||||
@@ -1379,18 +1410,22 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// objectReader represents a file open for reading on the SFTP server
|
// objectReader represents a file open for reading on the SFTP server
|
||||||
type objectReader struct {
|
type objectReader struct {
|
||||||
|
f *Fs
|
||||||
sftpFile *sftp.File
|
sftpFile *sftp.File
|
||||||
pipeReader *io.PipeReader
|
pipeReader *io.PipeReader
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newObjectReader(sftpFile *sftp.File) *objectReader {
|
func (f *Fs) newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
file := &objectReader{
|
file := &objectReader{
|
||||||
|
f: f,
|
||||||
sftpFile: sftpFile,
|
sftpFile: sftpFile,
|
||||||
pipeReader: pipeReader,
|
pipeReader: pipeReader,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
// Show connection in use
|
||||||
|
f.addTransfer()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// Use sftpFile.WriteTo to pump data so that it gets a
|
// Use sftpFile.WriteTo to pump data so that it gets a
|
||||||
@@ -1420,6 +1455,8 @@ func (file *objectReader) Close() (err error) {
|
|||||||
_ = file.pipeReader.Close()
|
_ = file.pipeReader.Close()
|
||||||
// Wait for the background process to finish
|
// Wait for the background process to finish
|
||||||
<-file.done
|
<-file.done
|
||||||
|
// Show connection no longer in use
|
||||||
|
file.f.removeTransfer()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1453,12 +1490,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return nil, errors.Wrap(err, "Open Seek failed")
|
return nil, errors.Wrap(err, "Open Seek failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
|
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
|
||||||
return in, nil
|
return in, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update a remote sftp file using the data <in> and ModTime from <src>
|
// Update a remote sftp file using the data <in> and ModTime from <src>
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
o.fs.addTransfer() // Show transfer in progress
|
||||||
|
defer o.fs.removeTransfer()
|
||||||
// Clear the hash cache since we are about to update the object
|
// Clear the hash cache since we are about to update the object
|
||||||
o.md5sum = nil
|
o.md5sum = nil
|
||||||
o.sha1sum = nil
|
o.sha1sum = nil
|
||||||
@@ -1496,10 +1535,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
remove()
|
remove()
|
||||||
return errors.Wrap(err, "Update Close failed")
|
return errors.Wrap(err, "Update Close failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
||||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update SetModTime failed")
|
return errors.Wrap(err, "Update SetModTime failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
|
||||||
|
if !o.fs.opt.SetModTime {
|
||||||
|
err = o.stat(ctx)
|
||||||
|
if err == fs.ErrorObjectNotFound {
|
||||||
|
// In the specific case of o.fs.opt.SetModTime == false
|
||||||
|
// if the object wasn't found then don't return an error
|
||||||
|
fs.Debugf(o, "Not found after upload with set_modtime=false so returning best guess")
|
||||||
|
o.modTime = src.ModTime(ctx)
|
||||||
|
o.size = src.Size()
|
||||||
|
o.mode = os.FileMode(0666) // regular file
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.Wrap(err, "Update stat failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/ncw/swift/v2"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -291,7 +292,10 @@ var retryErrorCodes = []int{
|
|||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||||
// retried. It returns the err as a convenience
|
// retried. It returns the err as a convenience
|
||||||
func shouldRetry(err error) (bool, error) {
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
// If this is a swift.Error object extract the HTTP error code
|
// If this is a swift.Error object extract the HTTP error code
|
||||||
if swiftError, ok := err.(*swift.Error); ok {
|
if swiftError, ok := err.(*swift.Error); ok {
|
||||||
for _, e := range retryErrorCodes {
|
for _, e := range retryErrorCodes {
|
||||||
@@ -307,7 +311,7 @@ func shouldRetry(err error) (bool, error) {
|
|||||||
// shouldRetryHeaders returns a boolean as to whether this err
|
// shouldRetryHeaders returns a boolean as to whether this err
|
||||||
// deserves to be retried. It reads the headers passed in looking for
|
// deserves to be retried. It reads the headers passed in looking for
|
||||||
// `Retry-After`. It returns the err as a convenience
|
// `Retry-After`. It returns the err as a convenience
|
||||||
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
|
func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (bool, error) {
|
||||||
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
|
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
|
||||||
if value := headers["Retry-After"]; value != "" {
|
if value := headers["Retry-After"]; value != "" {
|
||||||
retryAfter, parseErr := strconv.Atoi(value)
|
retryAfter, parseErr := strconv.Atoi(value)
|
||||||
@@ -326,7 +330,7 @@ func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsePath parses a remote 'url'
|
// parsePath parses a remote 'url'
|
||||||
@@ -468,7 +472,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
|||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
|
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||||
newRoot := path.Dir(f.root)
|
newRoot := path.Dir(f.root)
|
||||||
@@ -576,7 +580,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
|
|||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
objects, err = f.c.Objects(ctx, container, opts)
|
objects, err = f.c.Objects(ctx, container, opts)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for i := range objects {
|
for i := range objects {
|
||||||
@@ -661,7 +665,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
|||||||
var containers []swift.Container
|
var containers []swift.Container
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
containers, err = f.c.ContainersAll(ctx, nil)
|
containers, err = f.c.ContainersAll(ctx, nil)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "container listing failed")
|
return nil, errors.Wrap(err, "container listing failed")
|
||||||
@@ -753,7 +757,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
containers, err = f.c.ContainersAll(ctx, nil)
|
containers, err = f.c.ContainersAll(ctx, nil)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "container listing failed")
|
return nil, errors.Wrap(err, "container listing failed")
|
||||||
@@ -805,7 +809,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
_, rxHeaders, err = f.c.Container(ctx, container)
|
_, rxHeaders, err = f.c.Container(ctx, container)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err == swift.ContainerNotFound {
|
if err == swift.ContainerNotFound {
|
||||||
@@ -815,7 +819,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
err = f.c.ContainerCreate(ctx, container, headers)
|
err = f.c.ContainerCreate(ctx, container, headers)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Infof(f, "Container %q created", container)
|
fs.Infof(f, "Container %q created", container)
|
||||||
@@ -836,7 +840,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
err := f.cache.Remove(container, func() error {
|
err := f.cache.Remove(container, func() error {
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
err := f.c.ContainerDelete(ctx, container)
|
err := f.c.ContainerDelete(ctx, container)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Infof(f, "Container %q removed", container)
|
fs.Infof(f, "Container %q removed", container)
|
||||||
@@ -902,18 +906,125 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
isLargeObject, err := srcObj.isLargeObject(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if isLargeObject {
|
||||||
|
/*handle large object*/
|
||||||
|
err = copyLargeObject(ctx, f, srcObj, dstContainer, dstPath)
|
||||||
|
} else {
|
||||||
srcContainer, srcPath := srcObj.split()
|
srcContainer, srcPath := srcObj.split()
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
|
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer string, dstPath string) error {
|
||||||
|
segmentsContainer := dstContainer + "_segments"
|
||||||
|
err := f.makeContainer(ctx, segmentsContainer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
segments, err := src.getSegmentsLargeObject(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(segments) == 0 {
|
||||||
|
return errors.New("could not copy object, list segments are empty")
|
||||||
|
}
|
||||||
|
nanoSeconds := time.Now().Nanosecond()
|
||||||
|
prefixSegment := fmt.Sprintf("%v/%v/%s", nanoSeconds, src.size, strings.ReplaceAll(uuid.New().String(), "-", ""))
|
||||||
|
copiedSegmentsLen := 10
|
||||||
|
for _, value := range segments {
|
||||||
|
if len(value) <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fragment := value[0]
|
||||||
|
if len(fragment) <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
copiedSegmentsLen = len(value)
|
||||||
|
firstIndex := strings.Index(fragment, "/")
|
||||||
|
if firstIndex < 0 {
|
||||||
|
firstIndex = 0
|
||||||
|
} else {
|
||||||
|
firstIndex = firstIndex + 1
|
||||||
|
}
|
||||||
|
lastIndex := strings.LastIndex(fragment, "/")
|
||||||
|
if lastIndex < 0 {
|
||||||
|
lastIndex = len(fragment)
|
||||||
|
} else {
|
||||||
|
lastIndex = lastIndex - 1
|
||||||
|
}
|
||||||
|
prefixSegment = fragment[firstIndex:lastIndex]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
copiedSegments := make([]string, copiedSegmentsLen)
|
||||||
|
defer handleCopyFail(ctx, f, segmentsContainer, copiedSegments, err)
|
||||||
|
for c, ss := range segments {
|
||||||
|
if len(ss) <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, s := range ss {
|
||||||
|
lastIndex := strings.LastIndex(s, "/")
|
||||||
|
if lastIndex <= 0 {
|
||||||
|
lastIndex = 0
|
||||||
|
} else {
|
||||||
|
lastIndex = lastIndex + 1
|
||||||
|
}
|
||||||
|
segmentName := dstPath + "/" + prefixSegment + "/" + s[lastIndex:]
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
var rxHeaders swift.Headers
|
||||||
|
rxHeaders, err = f.c.ObjectCopy(ctx, c, s, segmentsContainer, segmentName, nil)
|
||||||
|
copiedSegments = append(copiedSegments, segmentName)
|
||||||
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m := swift.Metadata{}
|
||||||
|
headers := m.ObjectHeaders()
|
||||||
|
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s/%s", segmentsContainer, dstPath, prefixSegment))
|
||||||
|
headers["Content-Length"] = "0"
|
||||||
|
emptyReader := bytes.NewReader(nil)
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
var rxHeaders swift.Headers
|
||||||
|
rxHeaders, err = f.c.ObjectPut(ctx, dstContainer, dstPath, emptyReader, true, "", src.contentType, headers)
|
||||||
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//remove copied segments when copy process failed
|
||||||
|
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) {
|
||||||
|
fs.Debugf(f, "handle copy segment fail")
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(segmentsContainer) == 0 {
|
||||||
|
fs.Debugf(f, "invalid segments container")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(segments) == 0 {
|
||||||
|
fs.Debugf(f, "segments is empty")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fs.Debugf(f, "action delete segments what copied")
|
||||||
|
for _, v := range segments {
|
||||||
|
_ = f.c.ObjectDelete(ctx, segmentsContainer, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
return hash.Set(hash.MD5)
|
return hash.Set(hash.MD5)
|
||||||
@@ -1041,7 +1152,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
info, h, err = o.fs.c.Object(ctx, container, containerPath)
|
info, h, err = o.fs.c.Object(ctx, container, containerPath)
|
||||||
return shouldRetryHeaders(h, err)
|
return shouldRetryHeaders(ctx, h, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == swift.ObjectNotFound {
|
if err == swift.ObjectNotFound {
|
||||||
@@ -1100,7 +1211,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
|
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1121,7 +1232,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
|
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1211,7 +1322,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
|||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
|
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err == swift.ContainerNotFound {
|
if err == swift.ContainerNotFound {
|
||||||
headers := swift.Headers{}
|
headers := swift.Headers{}
|
||||||
@@ -1220,7 +1331,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
|||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
|
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1241,7 +1352,8 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
|||||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
|
_ctx := context.Background()
|
||||||
|
deleteChunks(_ctx, o, segmentsContainer, segmentInfos)
|
||||||
})()
|
})()
|
||||||
for {
|
for {
|
||||||
// can we read at least one byte?
|
// can we read at least one byte?
|
||||||
@@ -1267,7 +1379,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
segmentInfos = append(segmentInfos, segmentPath)
|
segmentInfos = append(segmentInfos, segmentPath)
|
||||||
}
|
}
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -1281,7 +1393,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
|||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -1356,7 +1468,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1414,7 +1526,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
// Remove file/manifest first
|
// Remove file/manifest first
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
||||||
return shouldRetry(err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package swift
|
package swift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -32,6 +33,7 @@ func TestInternalUrlEncode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalShouldRetryHeaders(t *testing.T) {
|
func TestInternalShouldRetryHeaders(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
headers := swift.Headers{
|
headers := swift.Headers{
|
||||||
"Content-Length": "64",
|
"Content-Length": "64",
|
||||||
"Content-Type": "text/html; charset=UTF-8",
|
"Content-Type": "text/html; charset=UTF-8",
|
||||||
@@ -45,7 +47,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
|
|||||||
|
|
||||||
// Short sleep should just do the sleep
|
// Short sleep should just do the sleep
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
retry, gotErr := shouldRetryHeaders(headers, err)
|
retry, gotErr := shouldRetryHeaders(ctx, headers, err)
|
||||||
dt := time.Since(start)
|
dt := time.Since(start)
|
||||||
assert.True(t, retry)
|
assert.True(t, retry)
|
||||||
assert.Equal(t, err, gotErr)
|
assert.Equal(t, err, gotErr)
|
||||||
@@ -54,7 +56,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
|
|||||||
// Long sleep should return RetryError
|
// Long sleep should return RetryError
|
||||||
headers["Retry-After"] = "3600"
|
headers["Retry-After"] = "3600"
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
retry, gotErr = shouldRetryHeaders(headers, err)
|
retry, gotErr = shouldRetryHeaders(ctx, headers, err)
|
||||||
dt = time.Since(start)
|
dt = time.Since(start)
|
||||||
assert.True(t, dt < time.Second)
|
assert.True(t, dt < time.Second)
|
||||||
assert.False(t, retry)
|
assert.False(t, retry)
|
||||||
|
|||||||
@@ -80,6 +80,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("NoChunk", f.testNoChunk)
|
t.Run("NoChunk", f.testNoChunk)
|
||||||
t.Run("WithChunk", f.testWithChunk)
|
t.Run("WithChunk", f.testWithChunk)
|
||||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||||
|
t.Run("CopyLargeObject", f.testCopyLargeObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) testWithChunk(t *testing.T) {
|
func (f *Fs) testWithChunk(t *testing.T) {
|
||||||
@@ -154,4 +155,39 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
|||||||
require.Empty(t, objs)
|
require.Empty(t, objs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||||
|
preConfChunkSize := f.opt.ChunkSize
|
||||||
|
preConfChunk := f.opt.NoChunk
|
||||||
|
f.opt.NoChunk = false
|
||||||
|
f.opt.ChunkSize = 1024 * fs.Byte
|
||||||
|
defer func() {
|
||||||
|
//restore old config after test
|
||||||
|
f.opt.ChunkSize = preConfChunkSize
|
||||||
|
f.opt.NoChunk = preConfChunk
|
||||||
|
}()
|
||||||
|
|
||||||
|
file := fstest.Item{
|
||||||
|
ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"),
|
||||||
|
Path: "large.txt",
|
||||||
|
Size: -1, // use unknown size during upload
|
||||||
|
}
|
||||||
|
const contentSize = 2048
|
||||||
|
contents := random.String(contentSize)
|
||||||
|
buf := bytes.NewBufferString(contents)
|
||||||
|
uploadHash := hash.NewMultiHasher()
|
||||||
|
in := io.TeeReader(buf, uploadHash)
|
||||||
|
|
||||||
|
file.Size = -1
|
||||||
|
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||||
|
ctx := context.TODO()
|
||||||
|
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, obj)
|
||||||
|
remoteTarget := "large.txt (copy)"
|
||||||
|
objTarget, err := f.Features().Copy(ctx, obj, remoteTarget)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, objTarget)
|
||||||
|
require.Equal(t, obj.Size(), objTarget.Size())
|
||||||
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -59,9 +59,19 @@ func (d *Directory) candidates() []upstream.Entry {
|
|||||||
// return an error or update the object properly (rather than e.g. calling panic).
|
// return an error or update the object properly (rather than e.g. calling panic).
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||||
|
if err == fs.ErrorPermissionDenied {
|
||||||
|
// There are no candidates in this object which can be written to
|
||||||
|
// So attempt to create a new object instead
|
||||||
|
newO, err := o.fs.put(ctx, in, src, false, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Update current object
|
||||||
|
*o = *newO.(*Object)
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if len(entries) == 1 {
|
if len(entries) == 1 {
|
||||||
obj := entries[0].(*upstream.Object)
|
obj := entries[0].(*upstream.Object)
|
||||||
return obj.Update(ctx, in, src, options...)
|
return obj.Update(ctx, in, src, options...)
|
||||||
|
|||||||
@@ -17,7 +17,9 @@ func init() {
|
|||||||
type EpFF struct{}
|
type EpFF struct{}
|
||||||
|
|
||||||
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||||
ch := make(chan *upstream.Fs)
|
ch := make(chan *upstream.Fs, len(upstreams))
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
for _, u := range upstreams {
|
for _, u := range upstreams {
|
||||||
u := u // Closure
|
u := u // Closure
|
||||||
go func() {
|
go func() {
|
||||||
@@ -30,16 +32,10 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
var u *upstream.Fs
|
var u *upstream.Fs
|
||||||
for i := 0; i < len(upstreams); i++ {
|
for range upstreams {
|
||||||
u = <-ch
|
u = <-ch
|
||||||
if u != nil {
|
if u != nil {
|
||||||
// close remaining goroutines
|
break
|
||||||
go func(num int) {
|
|
||||||
defer close(ch)
|
|
||||||
for i := 0; i < num; i++ {
|
|
||||||
<-ch
|
|
||||||
}
|
|
||||||
}(len(upstreams) - 1 - i)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if u == nil {
|
if u == nil {
|
||||||
|
|||||||
67
backend/union/union_internal_test.go
Normal file
67
backend/union/union_internal_test.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package union
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs/object"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *Fs) TestInternalReadOnly(t *testing.T) {
|
||||||
|
if f.name != "TestUnionRO" {
|
||||||
|
t.Skip("Only on RO union")
|
||||||
|
}
|
||||||
|
dir := "TestInternalReadOnly"
|
||||||
|
ctx := context.Background()
|
||||||
|
rofs := f.upstreams[len(f.upstreams)-1]
|
||||||
|
assert.False(t, rofs.IsWritable())
|
||||||
|
|
||||||
|
// Put a file onto the read only fs
|
||||||
|
contents := random.String(50)
|
||||||
|
file1 := fstest.NewItem(dir+"/file.txt", contents, time.Now())
|
||||||
|
_, obj1 := fstests.PutTestContents(ctx, t, rofs, &file1, contents, true)
|
||||||
|
|
||||||
|
// Check read from readonly fs via union
|
||||||
|
o, err := f.NewObject(ctx, file1.Path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(50), o.Size())
|
||||||
|
|
||||||
|
// Now call Update on the union Object with new data
|
||||||
|
contents2 := random.String(100)
|
||||||
|
file2 := fstest.NewItem(dir+"/file.txt", contents2, time.Now())
|
||||||
|
in := bytes.NewBufferString(contents2)
|
||||||
|
src := object.NewStaticObjectInfo(file2.Path, file2.ModTime, file2.Size, true, nil, nil)
|
||||||
|
err = o.Update(ctx, in, src)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(100), o.Size())
|
||||||
|
|
||||||
|
// Check we read the new object via the union
|
||||||
|
o, err = f.NewObject(ctx, file1.Path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(100), o.Size())
|
||||||
|
|
||||||
|
// Remove the object
|
||||||
|
assert.NoError(t, o.Remove(ctx))
|
||||||
|
|
||||||
|
// Check we read the old object in the read only layer now
|
||||||
|
o, err = f.NewObject(ctx, file1.Path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(50), o.Size())
|
||||||
|
|
||||||
|
// Remove file and dir from read only fs
|
||||||
|
assert.NoError(t, obj1.Remove(ctx))
|
||||||
|
assert.NoError(t, rofs.Rmdir(ctx, dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
|
t.Run("ReadOnly", f.TestInternalReadOnly)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
@@ -2,13 +2,15 @@
|
|||||||
package union_test
|
package union_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,17 +26,28 @@ func TestIntegration(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
|
||||||
|
for i := 1; i <= n; i++ {
|
||||||
|
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
|
||||||
|
require.NoError(t, err)
|
||||||
|
dirs = append(dirs, dir)
|
||||||
|
}
|
||||||
|
clean = func() {
|
||||||
|
for _, dir := range dirs {
|
||||||
|
err := os.RemoveAll(dir)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dirs, clean
|
||||||
|
}
|
||||||
|
|
||||||
func TestStandard(t *testing.T) {
|
func TestStandard(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-standard1")
|
dirs, clean := makeTestDirs(t, 3)
|
||||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-standard2")
|
defer clean()
|
||||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-standard3")
|
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
|
||||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
|
||||||
name := "TestUnion"
|
name := "TestUnion"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name + ":",
|
||||||
@@ -54,13 +67,9 @@ func TestRO(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-ro1")
|
dirs, clean := makeTestDirs(t, 3)
|
||||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-ro2")
|
defer clean()
|
||||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-ro3")
|
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
|
||||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
|
||||||
upstreams := tempdir1 + " " + tempdir2 + ":ro " + tempdir3 + ":ro"
|
|
||||||
name := "TestUnionRO"
|
name := "TestUnionRO"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name + ":",
|
||||||
@@ -80,13 +89,9 @@ func TestNC(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-nc1")
|
dirs, clean := makeTestDirs(t, 3)
|
||||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-nc2")
|
defer clean()
|
||||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-nc3")
|
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
|
||||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
|
||||||
upstreams := tempdir1 + " " + tempdir2 + ":nc " + tempdir3 + ":nc"
|
|
||||||
name := "TestUnionNC"
|
name := "TestUnionNC"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name + ":",
|
||||||
@@ -106,13 +111,9 @@ func TestPolicy1(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy11")
|
dirs, clean := makeTestDirs(t, 3)
|
||||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy12")
|
defer clean()
|
||||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy13")
|
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
|
||||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
|
||||||
name := "TestUnionPolicy1"
|
name := "TestUnionPolicy1"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name + ":",
|
||||||
@@ -132,13 +133,9 @@ func TestPolicy2(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy21")
|
dirs, clean := makeTestDirs(t, 3)
|
||||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy22")
|
defer clean()
|
||||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy23")
|
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
|
||||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
|
||||||
name := "TestUnionPolicy2"
|
name := "TestUnionPolicy2"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name + ":",
|
||||||
@@ -158,13 +155,9 @@ func TestPolicy3(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
|
dirs, clean := makeTestDirs(t, 3)
|
||||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
|
defer clean()
|
||||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
|
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
|
||||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
|
||||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
|
||||||
name := "TestUnionPolicy3"
|
name := "TestUnionPolicy3"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name + ":",
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -62,7 +63,7 @@ type Entry interface {
|
|||||||
// New creates a new Fs based on the
|
// New creates a new Fs based on the
|
||||||
// string formatted `type:root_path(:ro/:nc)`
|
// string formatted `type:root_path(:ro/:nc)`
|
||||||
func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs, error) {
|
func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs, error) {
|
||||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
configName, fsPath, err := fspath.SplitFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -83,15 +84,13 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
|
|||||||
f.creatable = false
|
f.creatable = false
|
||||||
fsPath = fsPath[0 : len(fsPath)-3]
|
fsPath = fsPath[0 : len(fsPath)-3]
|
||||||
}
|
}
|
||||||
if configName != "local" {
|
remote = configName + fsPath
|
||||||
fsPath = configName + ":" + fsPath
|
rFs, err := cache.Get(ctx, remote)
|
||||||
}
|
|
||||||
rFs, err := cache.Get(ctx, fsPath)
|
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.RootFs = rFs
|
f.RootFs = rFs
|
||||||
rootString := path.Join(fsPath, filepath.ToSlash(root))
|
rootString := path.Join(remote, filepath.ToSlash(root))
|
||||||
myFs, err := cache.Get(ctx, rootString)
|
myFs, err := cache.Get(ctx, rootString)
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -36,8 +36,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
rcloneClientID = "1000.OZNFWW075EKDSIE1R42HI9I2SUPC9A"
|
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
||||||
rcloneEncryptedClientSecret = "rn7myzbsYK3WlqO2EU6jU8wmj0ylsx7_1B5wvSaVncYbu1Wt0QxPW9FFbidjqAZtyxnBenYIWq1pcA"
|
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
@@ -96,6 +96,11 @@ func init() {
|
|||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fs.GetConfig(ctx).AutoConfirm {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err = setupRoot(ctx, name, m); err != nil {
|
if err = setupRoot(ctx, name, m); err != nil {
|
||||||
log.Fatalf("Failed to configure root directory: %v", err)
|
log.Fatalf("Failed to configure root directory: %v", err)
|
||||||
}
|
}
|
||||||
@@ -161,7 +166,7 @@ type Object struct {
|
|||||||
|
|
||||||
func setupRegion(m configmap.Mapper) {
|
func setupRegion(m configmap.Mapper) {
|
||||||
region, ok := m.Get("region")
|
region, ok := m.Get("region")
|
||||||
if !ok {
|
if !ok || region == "" {
|
||||||
log.Fatalf("No region set\n")
|
log.Fatalf("No region set\n")
|
||||||
}
|
}
|
||||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||||
@@ -372,6 +377,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err := configstruct.Set(m, opt); err != nil {
|
if err := configstruct.Set(m, opt); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
setupRegion(m)
|
||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
@@ -646,7 +652,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||||||
params.Set("filename", name)
|
params.Set("filename", name)
|
||||||
params.Set("parent_id", parent)
|
params.Set("parent_id", parent)
|
||||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, nil, "content", name)
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make multipart upload")
|
return nil, errors.Wrap(err, "failed to make multipart upload")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -161,15 +161,7 @@ func buildZip(dir string) string {
|
|||||||
// Build .deb and .rpm packages
|
// Build .deb and .rpm packages
|
||||||
//
|
//
|
||||||
// It returns a list of artifacts it has made
|
// It returns a list of artifacts it has made
|
||||||
func buildDebAndRpm(dir, version, goarchBuild string) []string {
|
func buildDebAndRpm(dir, version, goarch string) []string {
|
||||||
goarch := stripVersion(goarchBuild)
|
|
||||||
|
|
||||||
// Base ARM build we will mark as "arm5" so nfpm puts the
|
|
||||||
// architecture in as armel not armhf
|
|
||||||
if goarchBuild == "arm" {
|
|
||||||
goarch = "arm5"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make internal version number acceptable to .deb and .rpm
|
// Make internal version number acceptable to .deb and .rpm
|
||||||
pkgVersion := version[1:]
|
pkgVersion := version[1:]
|
||||||
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
|
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
|
||||||
@@ -384,7 +376,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
|||||||
artifacts := []string{buildZip(dir)}
|
artifacts := []string{buildZip(dir)}
|
||||||
// build a .deb and .rpm if appropriate
|
// build a .deb and .rpm if appropriate
|
||||||
if goos == "linux" {
|
if goos == "linux" {
|
||||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||||
}
|
}
|
||||||
if *copyAs != "" {
|
if *copyAs != "" {
|
||||||
for _, artifact := range artifacts {
|
for _, artifact := range artifacts {
|
||||||
|
|||||||
24
cmd/cmd.go
24
cmd/cmd.go
@@ -36,6 +36,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
|
"github.com/rclone/rclone/lib/buildinfo"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/terminal"
|
"github.com/rclone/rclone/lib/terminal"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -74,9 +75,24 @@ const (
|
|||||||
|
|
||||||
// ShowVersion prints the version to stdout
|
// ShowVersion prints the version to stdout
|
||||||
func ShowVersion() {
|
func ShowVersion() {
|
||||||
|
osVersion, osKernel := buildinfo.GetOSVersion()
|
||||||
|
if osVersion == "" {
|
||||||
|
osVersion = "unknown"
|
||||||
|
}
|
||||||
|
if osKernel == "" {
|
||||||
|
osKernel = "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||||
|
|
||||||
fmt.Printf("rclone %s\n", fs.Version)
|
fmt.Printf("rclone %s\n", fs.Version)
|
||||||
fmt.Printf("- os/arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
fmt.Printf("- os/version: %s\n", osVersion)
|
||||||
fmt.Printf("- go version: %s\n", runtime.Version())
|
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||||
|
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||||
|
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||||
|
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||||
|
fmt.Printf("- go/linking: %s\n", linking)
|
||||||
|
fmt.Printf("- go/tags: %s\n", tagString)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFsFile creates an Fs from a name but may point to a file.
|
// NewFsFile creates an Fs from a name but may point to a file.
|
||||||
@@ -84,7 +100,7 @@ func ShowVersion() {
|
|||||||
// It returns a string with the file name if points to a file
|
// It returns a string with the file name if points to a file
|
||||||
// otherwise "".
|
// otherwise "".
|
||||||
func NewFsFile(remote string) (fs.Fs, string) {
|
func NewFsFile(remote string) (fs.Fs, string) {
|
||||||
_, _, fsPath, err := fs.ParseRemote(remote)
|
_, fsPath, err := fspath.SplitFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(err)
|
||||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||||
@@ -548,7 +564,7 @@ func Main() {
|
|||||||
setupRootCommand(Root)
|
setupRootCommand(Root)
|
||||||
AddBackendFlags()
|
AddBackendFlags()
|
||||||
if err := Root.Execute(); err != nil {
|
if err := Root.Execute(); err != nil {
|
||||||
if strings.HasPrefix(err.Error(), "unknown command") {
|
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||||
}
|
}
|
||||||
log.Fatalf("Fatal error: %v", err)
|
log.Fatalf("Fatal error: %v", err)
|
||||||
|
|||||||
7
cmd/cmount/arch.go
Normal file
7
cmd/cmount/arch.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package cmount
|
||||||
|
|
||||||
|
// ProvidedBy returns true if the rclone build for the given OS
|
||||||
|
// provides support for lib/cgo-fuse
|
||||||
|
func ProvidedBy(osName string) bool {
|
||||||
|
return osName == "windows" || osName == "darwin"
|
||||||
|
}
|
||||||
@@ -21,12 +21,13 @@ import (
|
|||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
|
"github.com/rclone/rclone/lib/buildinfo"
|
||||||
"github.com/rclone/rclone/vfs"
|
"github.com/rclone/rclone/vfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
name := "cmount"
|
name := "cmount"
|
||||||
cmountOnly := runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
cmountOnly := ProvidedBy(runtime.GOOS)
|
||||||
if cmountOnly {
|
if cmountOnly {
|
||||||
name = "mount"
|
name = "mount"
|
||||||
}
|
}
|
||||||
@@ -35,6 +36,7 @@ func init() {
|
|||||||
cmd.Aliases = append(cmd.Aliases, "cmount")
|
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||||
}
|
}
|
||||||
mountlib.AddRc("cmount", mount)
|
mountlib.AddRc("cmount", mount)
|
||||||
|
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the option string in the current options
|
// Find the option string in the current options
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package copyurl
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
@@ -14,6 +15,7 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
autoFilename = false
|
autoFilename = false
|
||||||
|
printFilename = false
|
||||||
stdout = false
|
stdout = false
|
||||||
noClobber = false
|
noClobber = false
|
||||||
)
|
)
|
||||||
@@ -22,6 +24,7 @@ func init() {
|
|||||||
cmd.Root.AddCommand(commandDefinition)
|
cmd.Root.AddCommand(commandDefinition)
|
||||||
cmdFlags := commandDefinition.Flags()
|
cmdFlags := commandDefinition.Flags()
|
||||||
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
|
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
|
||||||
|
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename")
|
||||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
|
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
|
||||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
|
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
|
||||||
}
|
}
|
||||||
@@ -33,15 +36,16 @@ var commandDefinition = &cobra.Command{
|
|||||||
Download a URL's content and copy it to the destination without saving
|
Download a URL's content and copy it to the destination without saving
|
||||||
it in temporary storage.
|
it in temporary storage.
|
||||||
|
|
||||||
Setting --auto-filename will cause the file name to be retrieved from
|
Setting ` + "`--auto-filename`" + ` will cause the file name to be retrieved from
|
||||||
the from URL (after any redirections) and used in the destination
|
the from URL (after any redirections) and used in the destination
|
||||||
path.
|
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
|
||||||
|
be printed.
|
||||||
|
|
||||||
Setting --no-clobber will prevent overwriting file on the
|
Setting ` + "`--no-clobber`" + ` will prevent overwriting file on the
|
||||||
destination if there is one with the same name.
|
destination if there is one with the same name.
|
||||||
|
|
||||||
Setting --stdout or making the output file name "-" will cause the
|
Setting ` + "`--stdout`" + ` or making the output file name ` + "`-`" + `
|
||||||
output to be written to standard output.
|
will cause the output to be written to standard output.
|
||||||
`,
|
`,
|
||||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||||
cmd.CheckArgs(1, 2, command, args)
|
cmd.CheckArgs(1, 2, command, args)
|
||||||
@@ -61,10 +65,14 @@ output to be written to standard output.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
cmd.Run(true, true, command, func() error {
|
cmd.Run(true, true, command, func() error {
|
||||||
|
var dst fs.Object
|
||||||
if stdout {
|
if stdout {
|
||||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||||
} else {
|
} else {
|
||||||
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
|
dst, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
|
||||||
|
if printFilename && err == nil && dst != nil {
|
||||||
|
fmt.Println(dst.Remote())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package link
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -13,7 +12,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
expire = fs.Duration(time.Hour * 24 * 365 * 100)
|
expire = fs.DurationOff
|
||||||
unlink = false
|
unlink = false
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -181,6 +181,15 @@ func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Invalidate a leaf in a directory
|
||||||
|
func (d *Dir) invalidateEntry(dirNode fusefs.Node, leaf string) {
|
||||||
|
fs.Debugf(dirNode, "Invalidating %q", leaf)
|
||||||
|
err := d.fsys.server.InvalidateEntry(dirNode, leaf)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(dirNode, "Failed to invalidate %q: %v", leaf, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check interface satisfied
|
// Check interface satisfied
|
||||||
var _ fusefs.NodeRenamer = (*Dir)(nil)
|
var _ fusefs.NodeRenamer = (*Dir)(nil)
|
||||||
|
|
||||||
@@ -197,6 +206,13 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
|
|||||||
return translateError(err)
|
return translateError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Invalidate the new directory entry so it gets re-read (in
|
||||||
|
// the background otherwise we cause a deadlock)
|
||||||
|
//
|
||||||
|
// See https://github.com/rclone/rclone/issues/4977 for why
|
||||||
|
go d.invalidateEntry(newDir, req.NewName)
|
||||||
|
//go d.invalidateEntry(d, req.OldName)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ type FS struct {
|
|||||||
*vfs.VFS
|
*vfs.VFS
|
||||||
f fs.Fs
|
f fs.Fs
|
||||||
opt *mountlib.Options
|
opt *mountlib.Options
|
||||||
|
server *fusefs.Server
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check interface satisfied
|
// Check interface satisfied
|
||||||
|
|||||||
@@ -91,12 +91,12 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
|||||||
}
|
}
|
||||||
|
|
||||||
filesys := NewFS(VFS, opt)
|
filesys := NewFS(VFS, opt)
|
||||||
server := fusefs.New(c, nil)
|
filesys.server = fusefs.New(c, nil)
|
||||||
|
|
||||||
// Serve the mount point in the background returning error to errChan
|
// Serve the mount point in the background returning error to errChan
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
err := server.Serve(filesys)
|
err := filesys.server.Serve(filesys)
|
||||||
closeErr := c.Close()
|
closeErr := c.Close()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = closeErr
|
err = closeErr
|
||||||
|
|||||||
@@ -334,7 +334,7 @@ metadata about files like in UNIX. One case that may arise is that other program
|
|||||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||||
an SSH client may warn about "unprotected private key file".
|
an SSH client may warn about "unprotected private key file".
|
||||||
|
|
||||||
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
|
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
|
||||||
that allows the complete specification of file security descriptors using
|
that allows the complete specification of file security descriptors using
|
||||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
package selfupdate
|
package selfupdate
|
||||||
|
|
||||||
// Note: "|" will be replaced by backticks in the help string below
|
// Note: "|" will be replaced by backticks in the help string below
|
||||||
@@ -6,10 +8,30 @@ This command downloads the latest release of rclone and replaces
|
|||||||
the currently running binary. The download is verified with a hashsum
|
the currently running binary. The download is verified with a hashsum
|
||||||
and cryptographically signed signature.
|
and cryptographically signed signature.
|
||||||
|
|
||||||
The |--version VER| flag, if given, will update to a concrete version
|
If used without flags (or with implied |--stable| flag), this command
|
||||||
|
will install the latest stable release. However, some issues may be fixed
|
||||||
|
(or features added) only in the latest beta release. In such cases you should
|
||||||
|
run the command with the |--beta| flag, i.e. |rclone selfupdate --beta|.
|
||||||
|
You can check in advance what version would be installed by adding the
|
||||||
|
|--check| flag, then repeat the command without it when you are satisfied.
|
||||||
|
|
||||||
|
Sometimes the rclone team may recommend you a concrete beta or stable
|
||||||
|
rclone release to troubleshoot your issue or add a bleeding edge feature.
|
||||||
|
The |--version VER| flag, if given, will update to the concrete version
|
||||||
instead of the latest one. If you omit micro version from |VER| (for
|
instead of the latest one. If you omit micro version from |VER| (for
|
||||||
example |1.53|), the latest matching micro version will be used.
|
example |1.53|), the latest matching micro version will be used.
|
||||||
|
|
||||||
|
Upon successful update rclone will print a message that contains a previous
|
||||||
|
version number. You will need it if you later decide to revert your update
|
||||||
|
for some reason. Then you'll have to note the previous version and run the
|
||||||
|
following command: |rclone selfupdate [--beta] OLDVER|.
|
||||||
|
If the old version contains only dots and digits (for example |v1.54.0|)
|
||||||
|
then it's a stable release so you won't need the |--beta| flag. Beta releases
|
||||||
|
have an additional information similar to |v1.54.0-beta.5111.06f1c0c61|.
|
||||||
|
(if you are a developer and use a locally built rclone, the version number
|
||||||
|
will end with |-DEV|, you will have to rebuild it as it obviously can't
|
||||||
|
be distributed).
|
||||||
|
|
||||||
If you previously installed rclone via a package manager, the package may
|
If you previously installed rclone via a package manager, the package may
|
||||||
include local documentation or configure services. You may wish to update
|
include local documentation or configure services. You may wish to update
|
||||||
with the flag |--package deb| or |--package rpm| (whichever is correct for
|
with the flag |--package deb| or |--package rpm| (whichever is correct for
|
||||||
@@ -17,6 +39,15 @@ your OS) to update these too. This command with the default |--package zip|
|
|||||||
will update only the rclone executable so the local manual may become
|
will update only the rclone executable so the local manual may become
|
||||||
inaccurate after it.
|
inaccurate after it.
|
||||||
|
|
||||||
|
The |rclone mount| command (https://rclone.org/commands/rclone_mount/) may
|
||||||
|
or may not support extended FUSE options depending on the build and OS.
|
||||||
|
|selfupdate| will refuse to update if the capability would be discarded.
|
||||||
|
|
||||||
Note: Windows forbids deletion of a currently running executable so this
|
Note: Windows forbids deletion of a currently running executable so this
|
||||||
command will rename the old executable to 'rclone.old.exe' upon success.
|
command will rename the old executable to 'rclone.old.exe' upon success.
|
||||||
|
|
||||||
|
Please note that this command was not available before rclone version 1.55.
|
||||||
|
If it fails for you with the message |unknown command "selfupdate"| then
|
||||||
|
you will need to update manually following the install instructions located
|
||||||
|
at https://rclone.org/install/
|
||||||
`
|
`
|
||||||
|
|||||||
11
cmd/selfupdate/noselfupdate.go
Normal file
11
cmd/selfupdate/noselfupdate.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// +build noselfupdate
|
||||||
|
|
||||||
|
package selfupdate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/rclone/rclone/lib/buildinfo"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
buildinfo.Tags = append(buildinfo.Tags, "noselfupdate")
|
||||||
|
}
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
package selfupdate
|
package selfupdate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -21,9 +23,11 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/cmd/cmount"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/flags"
|
"github.com/rclone/rclone/fs/config/flags"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/buildinfo"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
@@ -141,20 +145,19 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
|
|||||||
return errors.New("--stable and --beta are mutually exclusive")
|
return errors.New("--stable and --beta are mutually exclusive")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The `cmount` tag is added by cmd/cmount/mount.go only if build is static.
|
||||||
|
_, tags := buildinfo.GetLinkingAndTags()
|
||||||
|
if strings.Contains(" "+tags+" ", " cmount ") && !cmount.ProvidedBy(runtime.GOOS) {
|
||||||
|
return errors.New("updating would discard the mount FUSE capability, aborting")
|
||||||
|
}
|
||||||
|
|
||||||
newVersion, siteURL, err := GetVersion(ctx, opt.Beta, opt.Version)
|
newVersion, siteURL, err := GetVersion(ctx, opt.Beta, opt.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "unable to detect new version")
|
return errors.Wrap(err, "unable to detect new version")
|
||||||
}
|
}
|
||||||
|
|
||||||
if newVersion == "" {
|
oldVersion := fs.Version
|
||||||
var err error
|
if newVersion == oldVersion {
|
||||||
_, newVersion, _, err = versionCmd.GetVersion(siteURL + "/version.txt")
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "unable to detect new version")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if newVersion == fs.Version {
|
|
||||||
fmt.Println("rclone is up to date")
|
fmt.Println("rclone is up to date")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -166,7 +169,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
|
|||||||
} else {
|
} else {
|
||||||
err := installPackage(ctx, opt.Beta, newVersion, siteURL, opt.Package)
|
err := installPackage(ctx, opt.Beta, newVersion, siteURL, opt.Package)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fmt.Printf("Successfully updated rclone package to version %s\n", newVersion)
|
fmt.Printf("Successfully updated rclone package from version %s to version %s\n", oldVersion, newVersion)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -218,7 +221,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
|
|||||||
|
|
||||||
err = replaceExecutable(targetFile, newFile, savedFile)
|
err = replaceExecutable(targetFile, newFile, savedFile)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fmt.Printf("Successfully updated rclone to version %s\n", newVersion)
|
fmt.Printf("Successfully updated rclone from version %s to version %s\n", oldVersion, newVersion)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
package selfupdate
|
package selfupdate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
package selfupdate
|
package selfupdate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
// +build !windows,!plan9,!js
|
// +build !windows,!plan9,!js
|
||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
package selfupdate
|
package selfupdate
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
// +build plan9 js
|
// +build plan9 js
|
||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
package selfupdate
|
package selfupdate
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
// +build windows
|
// +build windows
|
||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
package selfupdate
|
package selfupdate
|
||||||
|
|
||||||
|
|||||||
5
cmd/selfupdate_disabled.go
Normal file
5
cmd/selfupdate_disabled.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
// +build noselfupdate
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
const selfupdateEnabled = false
|
||||||
7
cmd/selfupdate_enabled.go
Normal file
7
cmd/selfupdate_enabled.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// +build !noselfupdate
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
// This constant must be in the `cmd` package rather than `cmd/selfupdate`
|
||||||
|
// to prevent build failure due to dependency loop.
|
||||||
|
const selfupdateEnabled = true
|
||||||
@@ -29,14 +29,24 @@ var commandDefinition = &cobra.Command{
|
|||||||
Use: "version",
|
Use: "version",
|
||||||
Short: `Show the version number.`,
|
Short: `Show the version number.`,
|
||||||
Long: `
|
Long: `
|
||||||
Show the version number, the go version and the architecture.
|
Show the rclone version number, the go version, the build target
|
||||||
|
OS and architecture, the runtime OS and kernel version and bitness,
|
||||||
|
build tags and the type of executable (static or dynamic).
|
||||||
|
|
||||||
Eg
|
For example:
|
||||||
|
|
||||||
$ rclone version
|
$ rclone version
|
||||||
rclone v1.41
|
rclone v1.55.0
|
||||||
- os/arch: linux/amd64
|
- os/version: ubuntu 18.04 (64 bit)
|
||||||
- go version: go1.10
|
- os/kernel: 4.15.0-136-generic (x86_64)
|
||||||
|
- os/type: linux
|
||||||
|
- os/arch: amd64
|
||||||
|
- go/version: go1.16
|
||||||
|
- go/linking: static
|
||||||
|
- go/tags: none
|
||||||
|
|
||||||
|
Note: before rclone version 1.55 the os/type and os/arch lines were merged,
|
||||||
|
and the "go/version" line was tagged as "go version".
|
||||||
|
|
||||||
If you supply the --check flag, then it will do an online check to
|
If you supply the --check flag, then it will do an online check to
|
||||||
compare your version with the latest release and the latest beta.
|
compare your version with the latest release and the latest beta.
|
||||||
@@ -89,9 +99,7 @@ func GetVersion(url string) (v *semver.Version, vs string, date time.Time, err e
|
|||||||
return v, vs, date, err
|
return v, vs, date, err
|
||||||
}
|
}
|
||||||
vs = strings.TrimSpace(string(bodyBytes))
|
vs = strings.TrimSpace(string(bodyBytes))
|
||||||
if strings.HasPrefix(vs, "rclone ") {
|
vs = strings.TrimPrefix(vs, "rclone ")
|
||||||
vs = vs[7:]
|
|
||||||
}
|
|
||||||
vs = strings.TrimRight(vs, "β")
|
vs = strings.TrimRight(vs, "β")
|
||||||
date, err = http.ParseTime(resp.Header.Get("Last-Modified"))
|
date, err = http.ParseTime(resp.Header.Get("Last-Modified"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -475,3 +475,7 @@ put them back in again.` >}}
|
|||||||
* Maxwell Calman <mcalman@MacBook-Pro.local>
|
* Maxwell Calman <mcalman@MacBook-Pro.local>
|
||||||
* Naveen Honest Raj <naveendurai19@gmail.com>
|
* Naveen Honest Raj <naveendurai19@gmail.com>
|
||||||
* Lucas Messenger <lmesseng@cisco.com>
|
* Lucas Messenger <lmesseng@cisco.com>
|
||||||
|
* Manish Kumar <krmanish260@gmail.com>
|
||||||
|
* x0b <x0bdev@gmail.com>
|
||||||
|
* CERN through the CS3MESH4EOSC Project
|
||||||
|
* Nick Gaya <nicholasgaya+github@gmail.com>
|
||||||
|
|||||||
@@ -392,6 +392,22 @@ See: the [encoding section in the overview](/overview/#encoding) for more info.
|
|||||||
- Type: MultiEncoder
|
- Type: MultiEncoder
|
||||||
- Default: Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8
|
- Default: Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8
|
||||||
|
|
||||||
|
#### --azureblob-public-access
|
||||||
|
|
||||||
|
Public access level of a container: blob, container.
|
||||||
|
|
||||||
|
- Config: public_access
|
||||||
|
- Env Var: RCLONE_AZUREBLOB_PUBLIC_ACCESS
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
- Examples:
|
||||||
|
- ""
|
||||||
|
- The container and its blobs can be accessed only with an authorized request. It's a default value
|
||||||
|
- "blob"
|
||||||
|
- Blob data within this container can be read via anonymous request.
|
||||||
|
- "container"
|
||||||
|
- Allow full public read access for container and blob data.
|
||||||
|
|
||||||
{{< rem autogenerated options stop >}}
|
{{< rem autogenerated options stop >}}
|
||||||
### Limitations ###
|
### Limitations ###
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,160 @@ description: "Rclone Changelog"
|
|||||||
|
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.55.0 - 2021-03-31
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.55.0)
|
||||||
|
|
||||||
|
* New commands
|
||||||
|
* [selfupdate](/commands/rclone_selfupdate/) (Ivan Andreev)
|
||||||
|
* Allows rclone to update itself in-place or via a package (using `--package` flag)
|
||||||
|
* Reads cryptographically signed signatures for non beta releases
|
||||||
|
* Works on all OSes.
|
||||||
|
* [test](/commands/rclone_test/) - these are test commands - use with care!
|
||||||
|
* `histogram` - Makes a histogram of file name characters.
|
||||||
|
* `info` - Discovers file name or other limitations for paths.
|
||||||
|
* `makefiles` - Make a random file hierarchy for testing.
|
||||||
|
* `memory` - Load all the objects at remote:path into memory and report memory stats.
|
||||||
|
* New Features
|
||||||
|
* [Connection strings](/docs/#connection-strings)
|
||||||
|
* Config parameters can now be passed as part of the remote name as a connection string.
|
||||||
|
* For example to do the equivalent of `--drive-shared-with-me` use `drive,shared_with_me:`
|
||||||
|
* Make sure we don't save on the fly remote config to the config file (Nick Craig-Wood)
|
||||||
|
* Make sure backends with additional config have a different name for caching (Nick Craig-Wood)
|
||||||
|
* This work was sponsored by CERN, through the [CS3MESH4EOSC Project](https://cs3mesh4eosc.eu/).
|
||||||
|
* CS3MESH4EOSC has received funding from the European Union’s Horizon 2020
|
||||||
|
* research and innovation programme under Grant Agreement no. 863353.
|
||||||
|
* build
|
||||||
|
* Update go build version to go1.16 and raise minimum go version to go1.13 (Nick Craig-Wood)
|
||||||
|
* Make a macOS ARM64 build to support Apple Silicon (Nick Craig-Wood)
|
||||||
|
* Install macfuse 4.x instead of osxfuse 3.x (Nick Craig-Wood)
|
||||||
|
* Use `GO386=softfloat` instead of deprecated `GO386=387` for 386 builds (Nick Craig-Wood)
|
||||||
|
* Disable IOS builds for the time being (Nick Craig-Wood)
|
||||||
|
* Androids builds made with up to date NDK (x0b)
|
||||||
|
* Add an rclone user to the Docker image but don't use it by default (cynthia kwok)
|
||||||
|
* dedupe: Make largest directory primary to minimize data moved (Saksham Khanna)
|
||||||
|
* config
|
||||||
|
* Wrap config library in an interface (Fionera)
|
||||||
|
* Make config file system pluggable (Nick Craig-Wood)
|
||||||
|
* `--config ""` or `"/notfound"` for in memory config only (Nick Craig-Wood)
|
||||||
|
* Clear fs cache of stale entries when altering config (Nick Craig-Wood)
|
||||||
|
* copyurl: Add option to print resulting auto-filename (albertony)
|
||||||
|
* delete: Make `--rmdirs` obey the filters (Nick Craig-Wood)
|
||||||
|
* docs - many fixes and reworks from edwardxml, albertony, pvalls, Ivan Andreev, Evan Harris, buengese, Alexey Tabakman
|
||||||
|
* encoder/filename - add SCSU as tables (Klaus Post)
|
||||||
|
* Add multiple paths support to `--compare-dest` and `--copy-dest` flag (K265)
|
||||||
|
* filter: Make `--exclude "dir/"` equivalent to `--exclude "dir/**"` (Nick Craig-Wood)
|
||||||
|
* fshttp: Add DSCP support with `--dscp` for QoS with differentiated services (Max Sum)
|
||||||
|
* lib/cache: Add Delete and DeletePrefix methods (Nick Craig-Wood)
|
||||||
|
* lib/file
|
||||||
|
* Make pre-allocate detect disk full errors and return them (Nick Craig-Wood)
|
||||||
|
* Don't run preallocate concurrently (Nick Craig-Wood)
|
||||||
|
* Retry preallocate on EINTR (Nick Craig-Wood)
|
||||||
|
* operations: Made copy and sync operations obey a RetryAfterError (Ankur Gupta)
|
||||||
|
* rc
|
||||||
|
* Add string alternatives for setting options over the rc (Nick Craig-Wood)
|
||||||
|
* Add `options/local` to see the options configured in the context (Nick Craig-Wood)
|
||||||
|
* Add `_config` parameter to set global config for just this rc call (Nick Craig-Wood)
|
||||||
|
* Implement passing filter config with `_filter` parameter (Nick Craig-Wood)
|
||||||
|
* Add `fscache/clear` and `fscache/entries` to control the fs cache (Nick Craig-Wood)
|
||||||
|
* Avoid +Inf value for speed in `core/stats` (albertony)
|
||||||
|
* Add a full set of stats to `core/stats` (Nick Craig-Wood)
|
||||||
|
* Allow `fs=` params to be a JSON blob (Nick Craig-Wood)
|
||||||
|
* rcd: Added systemd notification during the `rclone rcd` command. (Naveen Honest Raj)
|
||||||
|
* rmdirs: Make `--rmdirs` obey the filters (Nick Craig-Wood)
|
||||||
|
* version: Show build tags and type of executable (Ivan Andreev)
|
||||||
|
* Bug Fixes
|
||||||
|
* install.sh: make it fail on download errors (Ivan Andreev)
|
||||||
|
* Fix excessive retries missing `--max-duration` timeout (Nick Craig-Wood)
|
||||||
|
* Fix crash when `--low-level-retries=0` (Nick Craig-Wood)
|
||||||
|
* Fix failed token refresh on mounts created via the rc (Nick Craig-Wood)
|
||||||
|
* fshttp: Fix bandwidth limiting after bad merge (Nick Craig-Wood)
|
||||||
|
* lib/atexit
|
||||||
|
* Unregister interrupt handler once it has fired so users can interrupt again (Nick Craig-Wood)
|
||||||
|
* Fix occasional failure to unmount with CTRL-C (Nick Craig-Wood)
|
||||||
|
* Fix deadlock calling Finalise while Run is running (Nick Craig-Wood)
|
||||||
|
* lib/rest: Fix multipart uploads not stopping on context cancel (Nick Craig-Wood)
|
||||||
|
* Mount
|
||||||
|
* Allow mounting to root directory on windows (albertony)
|
||||||
|
* Improved handling of relative paths on windows (albertony)
|
||||||
|
* Fix unicode issues with accented characters on macOS (Nick Craig-Wood)
|
||||||
|
* Docs: document the new FileSecurity option in WinFsp 2021 (albertony)
|
||||||
|
* Docs: add note about volume path syntax on windows (albertony)
|
||||||
|
* Fix caching of old directories after renaming them (Nick Craig-Wood)
|
||||||
|
* Update cgofuse to the latest version to bring in macfuse 4 fix (Nick Craig-Wood)
|
||||||
|
* VFS
|
||||||
|
* `--vfs-used-is-size` to report used space using recursive scan (tYYGH)
|
||||||
|
* Don't set modification time if it was already correct (Nick Craig-Wood)
|
||||||
|
* Fix Create causing windows explorer to truncate files on CTRL-C CTRL-V (Nick Craig-Wood)
|
||||||
|
* Fix modtimes not updating when writing via cache (Nick Craig-Wood)
|
||||||
|
* Fix modtimes changing by fractional seconds after upload (Nick Craig-Wood)
|
||||||
|
* Fix modtime set if `--vfs-cache-mode writes`/`full` and no write (Nick Craig-Wood)
|
||||||
|
* Rename files in cache and cancel uploads on directory rename (Nick Craig-Wood)
|
||||||
|
* Fix directory renaming by renaming dirs cached in memory (Nick Craig-Wood)
|
||||||
|
* Local
|
||||||
|
* Add flag `--local-no-preallocate` (David Sze)
|
||||||
|
* Make `nounc` an advanced option except on Windows (albertony)
|
||||||
|
* Don't ignore preallocate disk full errors (Nick Craig-Wood)
|
||||||
|
* Cache
|
||||||
|
* Add `--fs-cache-expire-duration` to control the fs cache (Nick Craig-Wood)
|
||||||
|
* Crypt
|
||||||
|
* Add option to not encrypt data (Vesnyx)
|
||||||
|
* Log hash ok on upload (albertony)
|
||||||
|
* Azure Blob
|
||||||
|
* Add container public access level support. (Manish Kumar)
|
||||||
|
* B2
|
||||||
|
* Fix HTML files downloaded via cloudflare (Nick Craig-Wood)
|
||||||
|
* Box
|
||||||
|
* Fix transfers getting stuck on token expiry after API change (Nick Craig-Wood)
|
||||||
|
* Chunker
|
||||||
|
* Partially implement no-rename transactions (Maxwell Calman)
|
||||||
|
* Drive
|
||||||
|
* Don't stop server side copy if couldn't read description (Nick Craig-Wood)
|
||||||
|
* Pass context on to drive SDK - to help with cancellation (Nick Craig-Wood)
|
||||||
|
* Dropbox
|
||||||
|
* Add polling for changes support (Robert Thomas)
|
||||||
|
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||||
|
* Raise priority of rate limited message to INFO to make it more noticeable (Nick Craig-Wood)
|
||||||
|
* Fichier
|
||||||
|
* Implement copy & move (buengese)
|
||||||
|
* Implement public link (buengese)
|
||||||
|
* FTP
|
||||||
|
* Implement Shutdown method (Nick Craig-Wood)
|
||||||
|
* Close idle connections after `--ftp-idle-timeout` (1m by default) (Nick Craig-Wood)
|
||||||
|
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||||
|
* Add `--ftp-close-timeout` flag for use with awkward ftp servers (Nick Craig-Wood)
|
||||||
|
* Retry connections and logins on 421 errors (Nick Craig-Wood)
|
||||||
|
* Hdfs
|
||||||
|
* Fix permissions for when directory is created (Lucas Messenger)
|
||||||
|
* Onedrive
|
||||||
|
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||||
|
* S3
|
||||||
|
* Fix `--s3-profile` which wasn't working (Nick Craig-Wood)
|
||||||
|
* SFTP
|
||||||
|
* Close idle connections after `--sftp-idle-timeout` (1m by default) (Nick Craig-Wood)
|
||||||
|
* Fix "file not found" errors for read once servers (Nick Craig-Wood)
|
||||||
|
* Fix SetModTime stat failed: object not found with `--sftp-set-modtime=false` (Nick Craig-Wood)
|
||||||
|
* Swift
|
||||||
|
* Update github.com/ncw/swift to v2.0.0 (Nick Craig-Wood)
|
||||||
|
* Implement copying large objects (nguyenhuuluan434)
|
||||||
|
* Union
|
||||||
|
* Fix crash when using epff policy (Nick Craig-Wood)
|
||||||
|
* Fix union attempting to update files on a read only file system (Nick Craig-Wood)
|
||||||
|
* Refactor to use fspath.SplitFs instead of fs.ParseRemote (Nick Craig-Wood)
|
||||||
|
* Fix initialisation broken in refactor (Nick Craig-Wood)
|
||||||
|
* WebDAV
|
||||||
|
* Add support for sharepoint with NTLM authentication (Rauno Ots)
|
||||||
|
* Make sharepoint-ntlm docs more consistent (Alex Chen)
|
||||||
|
* Improve terminology in sharepoint-ntlm docs (Ivan Andreev)
|
||||||
|
* Disable HTTP/2 for NTLM authentication (georne)
|
||||||
|
* Fix sharepoint-ntlm error 401 for parallel actions (Ivan Andreev)
|
||||||
|
* Check that purged directory really exists (Ivan Andreev)
|
||||||
|
* Yandex
|
||||||
|
* Make `--timeout 0` work properly (Nick Craig-Wood)
|
||||||
|
* Zoho
|
||||||
|
* Replace client id - you will need to `rclone config reconnect` after this (buengese)
|
||||||
|
* Add forgotten setupRegion() to NewFs - this finally fixes regions other than EU (buengese)
|
||||||
|
|
||||||
## v1.54.1 - 2021-03-08
|
## v1.54.1 - 2021-03-08
|
||||||
|
|
||||||
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.54.1)
|
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.54.1)
|
||||||
|
|||||||
@@ -416,4 +416,27 @@ Choose how chunker should handle files with missing or invalid chunks.
|
|||||||
- "false"
|
- "false"
|
||||||
- Warn user, skip incomplete file and proceed.
|
- Warn user, skip incomplete file and proceed.
|
||||||
|
|
||||||
|
#### --chunker-transactions
|
||||||
|
|
||||||
|
Choose how chunker should handle temporary files during transactions.
|
||||||
|
|
||||||
|
- Config: transactions
|
||||||
|
- Env Var: RCLONE_CHUNKER_TRANSACTIONS
|
||||||
|
- Type: string
|
||||||
|
- Default: "rename"
|
||||||
|
- Examples:
|
||||||
|
- "rename"
|
||||||
|
- Rename temporary files after a successful transaction.
|
||||||
|
- "norename"
|
||||||
|
- Leave temporary file names and write transaction ID to metadata file.
|
||||||
|
- Metadata is required for no rename transactions (meta format cannot be "none").
|
||||||
|
- If you are using norename transactions you should be careful not to downgrade Rclone
|
||||||
|
- as older versions of Rclone don't support this transaction style and will misinterpret
|
||||||
|
- files manipulated by norename transactions.
|
||||||
|
- This method is EXPERIMENTAL, don't use on production systems.
|
||||||
|
- "auto"
|
||||||
|
- Rename or norename will be used depending on capabilities of the backend.
|
||||||
|
- If meta format is set to "none", rename transactions will always be used.
|
||||||
|
- This method is EXPERIMENTAL, don't use on production systems.
|
||||||
|
|
||||||
{{< rem autogenerated options stop >}}
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -72,11 +72,13 @@ See the [global flags page](/flags/) for global options not listed here.
|
|||||||
* [rclone rcd](/commands/rclone_rcd/) - Run rclone listening to remote control commands only.
|
* [rclone rcd](/commands/rclone_rcd/) - Run rclone listening to remote control commands only.
|
||||||
* [rclone rmdir](/commands/rclone_rmdir/) - Remove the empty directory at path.
|
* [rclone rmdir](/commands/rclone_rmdir/) - Remove the empty directory at path.
|
||||||
* [rclone rmdirs](/commands/rclone_rmdirs/) - Remove empty directories under the path.
|
* [rclone rmdirs](/commands/rclone_rmdirs/) - Remove empty directories under the path.
|
||||||
|
* [rclone selfupdate](/commands/rclone_selfupdate/) - Update the rclone binary.
|
||||||
* [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol.
|
* [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol.
|
||||||
* [rclone settier](/commands/rclone_settier/) - Changes storage class/tier of objects in remote.
|
* [rclone settier](/commands/rclone_settier/) - Changes storage class/tier of objects in remote.
|
||||||
* [rclone sha1sum](/commands/rclone_sha1sum/) - Produces an sha1sum file for all the objects in the path.
|
* [rclone sha1sum](/commands/rclone_sha1sum/) - Produces an sha1sum file for all the objects in the path.
|
||||||
* [rclone size](/commands/rclone_size/) - Prints the total size and number of objects in remote:path.
|
* [rclone size](/commands/rclone_size/) - Prints the total size and number of objects in remote:path.
|
||||||
* [rclone sync](/commands/rclone_sync/) - Make source and dest identical, modifying destination only.
|
* [rclone sync](/commands/rclone_sync/) - Make source and dest identical, modifying destination only.
|
||||||
|
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||||
* [rclone touch](/commands/rclone_touch/) - Create new file or change file modification time.
|
* [rclone touch](/commands/rclone_touch/) - Create new file or change file modification time.
|
||||||
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
|
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
|
||||||
* [rclone version](/commands/rclone_version/) - Show the version number.
|
* [rclone version](/commands/rclone_version/) - Show the version number.
|
||||||
|
|||||||
@@ -15,15 +15,16 @@ Copy url content to dest.
|
|||||||
Download a URL's content and copy it to the destination without saving
|
Download a URL's content and copy it to the destination without saving
|
||||||
it in temporary storage.
|
it in temporary storage.
|
||||||
|
|
||||||
Setting --auto-filename will cause the file name to be retrieved from
|
Setting `--auto-filename`will cause the file name to be retrieved from
|
||||||
the from URL (after any redirections) and used in the destination
|
the from URL (after any redirections) and used in the destination
|
||||||
path.
|
path. With `--print-filename` in addition, the resuling file name will
|
||||||
|
be printed.
|
||||||
|
|
||||||
Setting --no-clobber will prevent overwriting file on the
|
Setting `--no-clobber` will prevent overwriting file on the
|
||||||
destination if there is one with the same name.
|
destination if there is one with the same name.
|
||||||
|
|
||||||
Setting --stdout or making the output file name "-" will cause the
|
Setting `--stdout` or making the output file name `-`
|
||||||
output to be written to standard output.
|
will cause the output to be written to standard output.
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -36,6 +37,7 @@ rclone copyurl https://example.com dest:path [flags]
|
|||||||
-a, --auto-filename Get the file name from the URL and use it for destination file path
|
-a, --auto-filename Get the file name from the URL and use it for destination file path
|
||||||
-h, --help help for copyurl
|
-h, --help help for copyurl
|
||||||
--no-clobber Prevent overwriting file with same name
|
--no-clobber Prevent overwriting file with same name
|
||||||
|
-p, --print-filename Print the resulting name from --auto-filename
|
||||||
--stdout Write the output to stdout rather than a file
|
--stdout Write the output to stdout rather than a file
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ By default `dedupe` interactively finds files with duplicate
|
|||||||
names and offers to delete all but one or rename them to be
|
names and offers to delete all but one or rename them to be
|
||||||
different. This is known as deduping by name.
|
different. This is known as deduping by name.
|
||||||
|
|
||||||
Deduping by name is only useful with backends like Google Drive which
|
Deduping by name is only useful with a small group of backends (e.g. Google Drive,
|
||||||
can have duplicate file names. It can be run on wrapping backends
|
Opendrive) that can have duplicate file names. It can be run on wrapping backends
|
||||||
(e.g. crypt) if they wrap a backend which supports duplicate file
|
(e.g. crypt) if they wrap a backend which supports duplicate file
|
||||||
names.
|
names.
|
||||||
|
|
||||||
|
|||||||
@@ -29,15 +29,15 @@ is an **empty** **existing** directory:
|
|||||||
|
|
||||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||||
for details. The following examples will mount to an automatically assigned drive,
|
for details. The following examples will mount to an automatically assigned drive,
|
||||||
to specific drive letter `X:`, to path `C:\path\to\nonexistent\directory`
|
to specific drive letter `X:`, to path `C:\path\parent\mount`
|
||||||
(which must be **non-existent** subdirectory of an **existing** parent directory or drive,
|
(where parent directory or drive must exist, and mount must **not** exist,
|
||||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||||
the last example will mount as network share `\\cloud\remote` and map it to an
|
the last example will mount as network share `\\cloud\remote` and map it to an
|
||||||
automatically assigned drive:
|
automatically assigned drive:
|
||||||
|
|
||||||
rclone mount remote:path/to/files *
|
rclone mount remote:path/to/files *
|
||||||
rclone mount remote:path/to/files X:
|
rclone mount remote:path/to/files X:
|
||||||
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
|
rclone mount remote:path/to/files C:\path\parent\mount
|
||||||
rclone mount remote:path/to/files \\cloud\remote
|
rclone mount remote:path/to/files \\cloud\remote
|
||||||
|
|
||||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||||
@@ -91,14 +91,14 @@ and experience unexpected program errors, freezes or other issues, consider moun
|
|||||||
as a network drive instead.
|
as a network drive instead.
|
||||||
|
|
||||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||||
or to a path - which must be **non-existent** subdirectory of an **existing** parent
|
or to a path representing a **non-existent** subdirectory of an **existing** parent
|
||||||
directory or drive. Using the special value `*` will tell rclone to
|
directory or drive. Using the special value `*` will tell rclone to
|
||||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
rclone mount remote:path/to/files *
|
rclone mount remote:path/to/files *
|
||||||
rclone mount remote:path/to/files X:
|
rclone mount remote:path/to/files X:
|
||||||
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
|
rclone mount remote:path/to/files C:\path\parent\mount
|
||||||
rclone mount remote:path/to/files X:
|
rclone mount remote:path/to/files X:
|
||||||
|
|
||||||
Option `--volname` can be used to set a custom volume name for the mounted
|
Option `--volname` can be used to set a custom volume name for the mounted
|
||||||
@@ -171,10 +171,24 @@ Note that the mapping of permissions is not always trivial, and the result
|
|||||||
you see in Windows Explorer may not be exactly like you expected.
|
you see in Windows Explorer may not be exactly like you expected.
|
||||||
For example, when setting a value that includes write access, this will be
|
For example, when setting a value that includes write access, this will be
|
||||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||||
but not "write extended attributes" (WinFsp does not support extended attributes,
|
but not "write extended attributes". Windows will then show this as basic
|
||||||
see [this](https://github.com/billziss-gh/winfsp/wiki/NTFS-Compatibility)).
|
permission "Special" instead of "Write", because "Write" includes the
|
||||||
Windows will then show this as basic permission "Special" instead of "Write",
|
"write extended attributes" permission.
|
||||||
because "Write" includes the "write extended attributes" permission.
|
|
||||||
|
If you set POSIX permissions for only allowing access to the owner, using
|
||||||
|
`--file-perms 0600 --dir-perms 0700`, the user group and the built-in "Everyone"
|
||||||
|
group will still be given some special permissions, such as "read attributes"
|
||||||
|
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||||
|
e.g. to allow users without additional permissions to be able to read basic
|
||||||
|
metadata about files like in UNIX. One case that may arise is that other programs
|
||||||
|
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||||
|
an SSH client may warn about "unprotected private key file".
|
||||||
|
|
||||||
|
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
|
||||||
|
that allows the complete specification of file security descriptors using
|
||||||
|
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||||
|
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||||
|
by specifying `-o FileSecurity="D:P(A;;FA;;;OW)"`, for file all access (FA) to the owner (OW).
|
||||||
|
|
||||||
### Windows caveats
|
### Windows caveats
|
||||||
|
|
||||||
@@ -378,6 +392,13 @@ for two reasons. Firstly because it is only checked every
|
|||||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||||
evicted from the cache.
|
evicted from the cache.
|
||||||
|
|
||||||
|
You **should not** run two copies of rclone using the same VFS cache
|
||||||
|
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||||
|
This can potentially cause data corruption if you do. You can work
|
||||||
|
around this by giving each rclone its own cache hierarchy with
|
||||||
|
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||||
|
use don't overlap.
|
||||||
|
|
||||||
### --vfs-cache-mode off
|
### --vfs-cache-mode off
|
||||||
|
|
||||||
In this mode (the default) the cache will read directly from the remote and write
|
In this mode (the default) the cache will read directly from the remote and write
|
||||||
@@ -521,6 +542,19 @@ If the flag is not provided on the command line, then its default value depends
|
|||||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||||
otherwise. If the flag is provided without a value, then it is "true".
|
otherwise. If the flag is provided without a value, then it is "true".
|
||||||
|
|
||||||
|
## Alternate report of used bytes
|
||||||
|
|
||||||
|
Some backends, most notably S3, do not report the amount of bytes used.
|
||||||
|
If you need this information to be available when running `df` on the
|
||||||
|
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||||
|
With this flag set, instead of relying on the backend to report this
|
||||||
|
information, rclone will scan the whole remote similar to `rclone size`
|
||||||
|
and compute the total used space itself.
|
||||||
|
|
||||||
|
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||||
|
result is accurate. However, this is very inefficient and may cost lots of API
|
||||||
|
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
rclone mount remote:path /path/to/mountpoint [flags]
|
rclone mount remote:path /path/to/mountpoint [flags]
|
||||||
@@ -565,6 +599,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
|
|||||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||||
|
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||||
--volname string Set the volume name. Supported on Windows and OSX only.
|
--volname string Set the volume name. Supported on Windows and OSX only.
|
||||||
|
|||||||
84
docs/content/commands/rclone_selfupdate.md
Normal file
84
docs/content/commands/rclone_selfupdate.md
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
---
|
||||||
|
title: "rclone selfupdate"
|
||||||
|
description: "Update the rclone binary."
|
||||||
|
slug: rclone_selfupdate
|
||||||
|
url: /commands/rclone_selfupdate/
|
||||||
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/selfupdate/ and as part of making a release run "make commanddocs"
|
||||||
|
---
|
||||||
|
# rclone selfupdate
|
||||||
|
|
||||||
|
Update the rclone binary.
|
||||||
|
|
||||||
|
## Synopsis
|
||||||
|
|
||||||
|
|
||||||
|
This command downloads the latest release of rclone and replaces
|
||||||
|
the currently running binary. The download is verified with a hashsum
|
||||||
|
and cryptographically signed signature.
|
||||||
|
|
||||||
|
If used without flags (or with implied `--stable` flag), this command
|
||||||
|
will install the latest stable release. However, some issues may be fixed
|
||||||
|
(or features added) only in the latest beta release. In such cases you should
|
||||||
|
run the command with the `--beta` flag, i.e. `rclone selfupdate --beta`.
|
||||||
|
You can check in advance what version would be installed by adding the
|
||||||
|
`--check` flag, then repeat the command without it when you are satisfied.
|
||||||
|
|
||||||
|
Sometimes the rclone team may recommend you a concrete beta or stable
|
||||||
|
rclone release to troubleshoot your issue or add a bleeding edge feature.
|
||||||
|
The `--version VER` flag, if given, will update to the concrete version
|
||||||
|
instead of the latest one. If you omit micro version from `VER` (for
|
||||||
|
example `1.53`), the latest matching micro version will be used.
|
||||||
|
|
||||||
|
Upon successful update rclone will print a message that contains a previous
|
||||||
|
version number. You will need it if you later decide to revert your update
|
||||||
|
for some reason. Then you'll have to note the previous version and run the
|
||||||
|
following command: `rclone selfupdate [--beta] OLDVER`.
|
||||||
|
If the old version contains only dots and digits (for example `v1.54.0`)
|
||||||
|
then it's a stable release so you won't need the `--beta` flag. Beta releases
|
||||||
|
have an additional information similar to `v1.54.0-beta.5111.06f1c0c61`.
|
||||||
|
(if you are a developer and use a locally built rclone, the version number
|
||||||
|
will end with `-DEV`, you will have to rebuild it as it obvisously can't
|
||||||
|
be distributed).
|
||||||
|
|
||||||
|
If you previously installed rclone via a package manager, the package may
|
||||||
|
include local documentation or configure services. You may wish to update
|
||||||
|
with the flag `--package deb` or `--package rpm` (whichever is correct for
|
||||||
|
your OS) to update these too. This command with the default `--package zip`
|
||||||
|
will update only the rclone executable so the local manual may become
|
||||||
|
inaccurate after it.
|
||||||
|
|
||||||
|
The `rclone mount` command (https://rclone.org/commands/rclone_mount/) may
|
||||||
|
or may not support extended FUSE options depending on the build and OS.
|
||||||
|
`selfupdate` will refuse to update if the capability would be discarded.
|
||||||
|
|
||||||
|
Note: Windows forbids deletion of a currently running executable so this
|
||||||
|
command will rename the old executable to 'rclone.old.exe' upon success.
|
||||||
|
|
||||||
|
Please note that this command was not available before rclone version 1.55.
|
||||||
|
If it fails for you with the message `unknown command "selfupdate"` then
|
||||||
|
you will need to update manually following the install instructions located
|
||||||
|
at https://rclone.org/install/
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone selfupdate [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
```
|
||||||
|
--beta Install beta release.
|
||||||
|
--check Check for latest release, do not download.
|
||||||
|
-h, --help help for selfupdate
|
||||||
|
--output string Save the downloaded binary at a given path (default: replace running binary)
|
||||||
|
--package string Package format: zip|deb|rpm (default: zip)
|
||||||
|
--stable Install stable release (this is the default)
|
||||||
|
--version string Install the given rclone version (default: latest)
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [global flags page](/flags/) for global options not listed here.
|
||||||
|
|
||||||
|
## SEE ALSO
|
||||||
|
|
||||||
|
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||||
|
|
||||||
@@ -134,6 +134,13 @@ for two reasons. Firstly because it is only checked every
|
|||||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||||
evicted from the cache.
|
evicted from the cache.
|
||||||
|
|
||||||
|
You **should not** run two copies of rclone using the same VFS cache
|
||||||
|
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||||
|
This can potentially cause data corruption if you do. You can work
|
||||||
|
around this by giving each rclone its own cache hierarchy with
|
||||||
|
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||||
|
use don't overlap.
|
||||||
|
|
||||||
### --vfs-cache-mode off
|
### --vfs-cache-mode off
|
||||||
|
|
||||||
In this mode (the default) the cache will read directly from the remote and write
|
In this mode (the default) the cache will read directly from the remote and write
|
||||||
@@ -277,6 +284,19 @@ If the flag is not provided on the command line, then its default value depends
|
|||||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||||
otherwise. If the flag is provided without a value, then it is "true".
|
otherwise. If the flag is provided without a value, then it is "true".
|
||||||
|
|
||||||
|
## Alternate report of used bytes
|
||||||
|
|
||||||
|
Some backends, most notably S3, do not report the amount of bytes used.
|
||||||
|
If you need this information to be available when running `df` on the
|
||||||
|
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||||
|
With this flag set, instead of relying on the backend to report this
|
||||||
|
information, rclone will scan the whole remote similar to `rclone size`
|
||||||
|
and compute the total used space itself.
|
||||||
|
|
||||||
|
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||||
|
result is accurate. However, this is very inefficient and may cost lots of API
|
||||||
|
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
rclone serve dlna remote:path [flags]
|
rclone serve dlna remote:path [flags]
|
||||||
@@ -309,6 +329,7 @@ rclone serve dlna remote:path [flags]
|
|||||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||||
|
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -133,6 +133,13 @@ for two reasons. Firstly because it is only checked every
|
|||||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||||
evicted from the cache.
|
evicted from the cache.
|
||||||
|
|
||||||
|
You **should not** run two copies of rclone using the same VFS cache
|
||||||
|
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||||
|
This can potentially cause data corruption if you do. You can work
|
||||||
|
around this by giving each rclone its own cache hierarchy with
|
||||||
|
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||||
|
use don't overlap.
|
||||||
|
|
||||||
### --vfs-cache-mode off
|
### --vfs-cache-mode off
|
||||||
|
|
||||||
In this mode (the default) the cache will read directly from the remote and write
|
In this mode (the default) the cache will read directly from the remote and write
|
||||||
@@ -276,6 +283,19 @@ If the flag is not provided on the command line, then its default value depends
|
|||||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||||
otherwise. If the flag is provided without a value, then it is "true".
|
otherwise. If the flag is provided without a value, then it is "true".
|
||||||
|
|
||||||
|
## Alternate report of used bytes
|
||||||
|
|
||||||
|
Some backends, most notably S3, do not report the amount of bytes used.
|
||||||
|
If you need this information to be available when running `df` on the
|
||||||
|
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||||
|
With this flag set, instead of relying on the backend to report this
|
||||||
|
information, rclone will scan the whole remote similar to `rclone size`
|
||||||
|
and compute the total used space itself.
|
||||||
|
|
||||||
|
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||||
|
result is accurate. However, this is very inefficient and may cost lots of API
|
||||||
|
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||||
|
|
||||||
## Auth Proxy
|
## Auth Proxy
|
||||||
|
|
||||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||||
@@ -394,6 +414,7 @@ rclone serve ftp remote:path [flags]
|
|||||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||||
|
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -205,6 +205,13 @@ for two reasons. Firstly because it is only checked every
|
|||||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||||
evicted from the cache.
|
evicted from the cache.
|
||||||
|
|
||||||
|
You **should not** run two copies of rclone using the same VFS cache
|
||||||
|
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||||
|
This can potentially cause data corruption if you do. You can work
|
||||||
|
around this by giving each rclone its own cache hierarchy with
|
||||||
|
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||||
|
use don't overlap.
|
||||||
|
|
||||||
### --vfs-cache-mode off
|
### --vfs-cache-mode off
|
||||||
|
|
||||||
In this mode (the default) the cache will read directly from the remote and write
|
In this mode (the default) the cache will read directly from the remote and write
|
||||||
@@ -348,6 +355,19 @@ If the flag is not provided on the command line, then its default value depends
|
|||||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||||
otherwise. If the flag is provided without a value, then it is "true".
|
otherwise. If the flag is provided without a value, then it is "true".
|
||||||
|
|
||||||
|
## Alternate report of used bytes
|
||||||
|
|
||||||
|
Some backends, most notably S3, do not report the amount of bytes used.
|
||||||
|
If you need this information to be available when running `df` on the
|
||||||
|
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||||
|
With this flag set, instead of relying on the backend to report this
|
||||||
|
information, rclone will scan the whole remote similar to `rclone size`
|
||||||
|
and compute the total used space itself.
|
||||||
|
|
||||||
|
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||||
|
result is accurate. However, this is very inefficient and may cost lots of API
|
||||||
|
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
rclone serve http remote:path [flags]
|
rclone serve http remote:path [flags]
|
||||||
@@ -390,6 +410,7 @@ rclone serve http remote:path [flags]
|
|||||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||||
|
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -144,6 +144,13 @@ for two reasons. Firstly because it is only checked every
|
|||||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||||
evicted from the cache.
|
evicted from the cache.
|
||||||
|
|
||||||
|
You **should not** run two copies of rclone using the same VFS cache
|
||||||
|
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||||
|
This can potentially cause data corruption if you do. You can work
|
||||||
|
around this by giving each rclone its own cache hierarchy with
|
||||||
|
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||||
|
use don't overlap.
|
||||||
|
|
||||||
### --vfs-cache-mode off
|
### --vfs-cache-mode off
|
||||||
|
|
||||||
In this mode (the default) the cache will read directly from the remote and write
|
In this mode (the default) the cache will read directly from the remote and write
|
||||||
@@ -287,6 +294,19 @@ If the flag is not provided on the command line, then its default value depends
|
|||||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||||
otherwise. If the flag is provided without a value, then it is "true".
|
otherwise. If the flag is provided without a value, then it is "true".
|
||||||
|
|
||||||
|
## Alternate report of used bytes
|
||||||
|
|
||||||
|
Some backends, most notably S3, do not report the amount of bytes used.
|
||||||
|
If you need this information to be available when running `df` on the
|
||||||
|
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||||
|
With this flag set, instead of relying on the backend to report this
|
||||||
|
information, rclone will scan the whole remote similar to `rclone size`
|
||||||
|
and compute the total used space itself.
|
||||||
|
|
||||||
|
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||||
|
result is accurate. However, this is very inefficient and may cost lots of API
|
||||||
|
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||||
|
|
||||||
## Auth Proxy
|
## Auth Proxy
|
||||||
|
|
||||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||||
@@ -404,6 +424,7 @@ rclone serve sftp remote:path [flags]
|
|||||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||||
|
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -213,6 +213,13 @@ for two reasons. Firstly because it is only checked every
|
|||||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||||
evicted from the cache.
|
evicted from the cache.
|
||||||
|
|
||||||
|
You **should not** run two copies of rclone using the same VFS cache
|
||||||
|
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
|
||||||
|
This can potentially cause data corruption if you do. You can work
|
||||||
|
around this by giving each rclone its own cache hierarchy with
|
||||||
|
`--cache-dir`. You don't need to worry about this if the remotes in
|
||||||
|
use don't overlap.
|
||||||
|
|
||||||
### --vfs-cache-mode off
|
### --vfs-cache-mode off
|
||||||
|
|
||||||
In this mode (the default) the cache will read directly from the remote and write
|
In this mode (the default) the cache will read directly from the remote and write
|
||||||
@@ -356,6 +363,19 @@ If the flag is not provided on the command line, then its default value depends
|
|||||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||||
otherwise. If the flag is provided without a value, then it is "true".
|
otherwise. If the flag is provided without a value, then it is "true".
|
||||||
|
|
||||||
|
## Alternate report of used bytes
|
||||||
|
|
||||||
|
Some backends, most notably S3, do not report the amount of bytes used.
|
||||||
|
If you need this information to be available when running `df` on the
|
||||||
|
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
|
||||||
|
With this flag set, instead of relying on the backend to report this
|
||||||
|
information, rclone will scan the whole remote similar to `rclone size`
|
||||||
|
and compute the total used space itself.
|
||||||
|
|
||||||
|
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||||
|
result is accurate. However, this is very inefficient and may cost lots of API
|
||||||
|
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||||
|
|
||||||
## Auth Proxy
|
## Auth Proxy
|
||||||
|
|
||||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||||
@@ -482,6 +502,7 @@ rclone serve webdav remote:path [flags]
|
|||||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||||
|
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
|
||||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -15,7 +15,8 @@ Make source and dest identical, modifying destination only.
|
|||||||
Sync the source to the destination, changing the destination
|
Sync the source to the destination, changing the destination
|
||||||
only. Doesn't transfer unchanged files, testing by size and
|
only. Doesn't transfer unchanged files, testing by size and
|
||||||
modification time or MD5SUM. Destination is updated to match
|
modification time or MD5SUM. Destination is updated to match
|
||||||
source, including deleting files if necessary.
|
source, including deleting files if necessary (except duplicate
|
||||||
|
objects, see below).
|
||||||
|
|
||||||
**Important**: Since this can cause data loss, test first with the
|
**Important**: Since this can cause data loss, test first with the
|
||||||
`--dry-run` or the `--interactive`/`-i` flag.
|
`--dry-run` or the `--interactive`/`-i` flag.
|
||||||
@@ -23,7 +24,8 @@ source, including deleting files if necessary.
|
|||||||
rclone sync -i SOURCE remote:DESTINATION
|
rclone sync -i SOURCE remote:DESTINATION
|
||||||
|
|
||||||
Note that files in the destination won't be deleted if there were any
|
Note that files in the destination won't be deleted if there were any
|
||||||
errors at any point.
|
errors at any point. Duplicate objects (files with the same name, on
|
||||||
|
those providers that support it) are also not yet handled.
|
||||||
|
|
||||||
It is always the contents of the directory that is synced, not the
|
It is always the contents of the directory that is synced, not the
|
||||||
directory so when source:path is a directory, it's the contents of
|
directory so when source:path is a directory, it's the contents of
|
||||||
@@ -35,6 +37,9 @@ go there.
|
|||||||
|
|
||||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||||
|
|
||||||
|
**Note**: Use the `rclone dedupe` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
|
||||||
|
See [this forum post](https://forum.rclone.org/t/sync-not-clearing-duplicates/14372) for more info.
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
rclone sync source:path dest:path [flags]
|
rclone sync source:path dest:path [flags]
|
||||||
|
|||||||
41
docs/content/commands/rclone_test.md
Normal file
41
docs/content/commands/rclone_test.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
title: "rclone test"
|
||||||
|
description: "Run a test command"
|
||||||
|
slug: rclone_test
|
||||||
|
url: /commands/rclone_test/
|
||||||
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/ and as part of making a release run "make commanddocs"
|
||||||
|
---
|
||||||
|
# rclone test
|
||||||
|
|
||||||
|
Run a test command
|
||||||
|
|
||||||
|
## Synopsis
|
||||||
|
|
||||||
|
Rclone test is used to run test commands.
|
||||||
|
|
||||||
|
Select which test comand you want with the subcommand, eg
|
||||||
|
|
||||||
|
rclone test memory remote:
|
||||||
|
|
||||||
|
Each subcommand has its own options which you can see in their help.
|
||||||
|
|
||||||
|
**NB** Be careful running these commands, they may do strange things
|
||||||
|
so reading their documentation first is recommended.
|
||||||
|
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for test
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [global flags page](/flags/) for global options not listed here.
|
||||||
|
|
||||||
|
## SEE ALSO
|
||||||
|
|
||||||
|
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||||
|
* [rclone test histogram](/commands/rclone_test_histogram/) - Makes a histogram of file name characters.
|
||||||
|
* [rclone test info](/commands/rclone_test_info/) - Discovers file name or other limitations for paths.
|
||||||
|
* [rclone test makefiles](/commands/rclone_test_makefiles/) - Make a random file hierarchy in <dir>
|
||||||
|
* [rclone test memory](/commands/rclone_test_memory/) - Load all the objects at remote:path into memory and report memory stats.
|
||||||
|
|
||||||
36
docs/content/commands/rclone_test_histogram.md
Normal file
36
docs/content/commands/rclone_test_histogram.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
title: "rclone test histogram"
|
||||||
|
description: "Makes a histogram of file name characters."
|
||||||
|
slug: rclone_test_histogram
|
||||||
|
url: /commands/rclone_test_histogram/
|
||||||
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/histogram/ and as part of making a release run "make commanddocs"
|
||||||
|
---
|
||||||
|
# rclone test histogram
|
||||||
|
|
||||||
|
Makes a histogram of file name characters.
|
||||||
|
|
||||||
|
## Synopsis
|
||||||
|
|
||||||
|
This command outputs JSON which shows the histogram of characters used
|
||||||
|
in filenames in the remote:path specified.
|
||||||
|
|
||||||
|
The data doesn't contain any identifying information but is useful for
|
||||||
|
the rclone developers when developing filename compression.
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone test histogram [remote:path] [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for histogram
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [global flags page](/flags/) for global options not listed here.
|
||||||
|
|
||||||
|
## SEE ALSO
|
||||||
|
|
||||||
|
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||||
|
|
||||||
44
docs/content/commands/rclone_test_info.md
Normal file
44
docs/content/commands/rclone_test_info.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
title: "rclone test info"
|
||||||
|
description: "Discovers file name or other limitations for paths."
|
||||||
|
slug: rclone_test_info
|
||||||
|
url: /commands/rclone_test_info/
|
||||||
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/info/ and as part of making a release run "make commanddocs"
|
||||||
|
---
|
||||||
|
# rclone test info
|
||||||
|
|
||||||
|
Discovers file name or other limitations for paths.
|
||||||
|
|
||||||
|
## Synopsis
|
||||||
|
|
||||||
|
rclone info discovers what filenames and upload methods are possible
|
||||||
|
to write to the paths passed in and how long they can be. It can take some
|
||||||
|
time. It will write test files into the remote:path passed in. It outputs
|
||||||
|
a bit of go code for each one.
|
||||||
|
|
||||||
|
**NB** this can create undeletable files and other hazards - use with care
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone test info [remote:path]+ [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
```
|
||||||
|
--all Run all tests.
|
||||||
|
--check-control Check control characters.
|
||||||
|
--check-length Check max filename length.
|
||||||
|
--check-normalization Check UTF-8 Normalization.
|
||||||
|
--check-streaming Check uploads with indeterminate file size.
|
||||||
|
-h, --help help for info
|
||||||
|
--upload-wait duration Wait after writing a file.
|
||||||
|
--write-json string Write results to file.
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [global flags page](/flags/) for global options not listed here.
|
||||||
|
|
||||||
|
## SEE ALSO
|
||||||
|
|
||||||
|
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||||
|
|
||||||
33
docs/content/commands/rclone_test_makefiles.md
Normal file
33
docs/content/commands/rclone_test_makefiles.md
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
title: "rclone test makefiles"
|
||||||
|
description: "Make a random file hierarchy in <dir>"
|
||||||
|
slug: rclone_test_makefiles
|
||||||
|
url: /commands/rclone_test_makefiles/
|
||||||
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/makefiles/ and as part of making a release run "make commanddocs"
|
||||||
|
---
|
||||||
|
# rclone test makefiles
|
||||||
|
|
||||||
|
Make a random file hierarchy in <dir>
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone test makefiles <dir> [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
```
|
||||||
|
--files int Number of files to create (default 1000)
|
||||||
|
--files-per-directory int Average number of files per directory (default 10)
|
||||||
|
-h, --help help for makefiles
|
||||||
|
--max-file-size SizeSuffix Maximum size of files to create (default 100)
|
||||||
|
--max-name-length int Maximum size of file names (default 12)
|
||||||
|
--min-file-size SizeSuffix Minimum size of file to create
|
||||||
|
--min-name-length int Minimum size of file names (default 4)
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [global flags page](/flags/) for global options not listed here.
|
||||||
|
|
||||||
|
## SEE ALSO
|
||||||
|
|
||||||
|
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||||
|
|
||||||
27
docs/content/commands/rclone_test_memory.md
Normal file
27
docs/content/commands/rclone_test_memory.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
title: "rclone test memory"
|
||||||
|
description: "Load all the objects at remote:path into memory and report memory stats."
|
||||||
|
slug: rclone_test_memory
|
||||||
|
url: /commands/rclone_test_memory/
|
||||||
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/memory/ and as part of making a release run "make commanddocs"
|
||||||
|
---
|
||||||
|
# rclone test memory
|
||||||
|
|
||||||
|
Load all the objects at remote:path into memory and report memory stats.
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone test memory remote:path [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for memory
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [global flags page](/flags/) for global options not listed here.
|
||||||
|
|
||||||
|
## SEE ALSO
|
||||||
|
|
||||||
|
* [rclone test](/commands/rclone_test/) - Run a test command
|
||||||
|
|
||||||
@@ -12,14 +12,21 @@ Show the version number.
|
|||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
|
||||||
Show the version number, the go version and the architecture.
|
Show the rclone version number, the go version, the build target OS and
|
||||||
|
architecture, build tags and the type of executable (static or dynamic).
|
||||||
|
|
||||||
Eg
|
For example:
|
||||||
|
|
||||||
$ rclone version
|
$ rclone version
|
||||||
rclone v1.41
|
rclone v1.54
|
||||||
- os/arch: linux/amd64
|
- os/type: linux
|
||||||
- go version: go1.10
|
- os/arch: amd64
|
||||||
|
- go/version: go1.16
|
||||||
|
- go/linking: static
|
||||||
|
- go/tags: none
|
||||||
|
|
||||||
|
Note: before rclone version 1.55 the os/type and os/arch lines were merged,
|
||||||
|
and the "go/version" line was tagged as "go version".
|
||||||
|
|
||||||
If you supply the --check flag, then it will do an online check to
|
If you supply the --check flag, then it will do an online check to
|
||||||
compare your version with the latest release and the latest beta.
|
compare your version with the latest release and the latest beta.
|
||||||
|
|||||||
@@ -517,6 +517,20 @@ names, or for debugging purposes.
|
|||||||
- Type: bool
|
- Type: bool
|
||||||
- Default: false
|
- Default: false
|
||||||
|
|
||||||
|
#### --crypt-no-data-encryption
|
||||||
|
|
||||||
|
Option to either encrypt file data or leave it unencrypted.
|
||||||
|
|
||||||
|
- Config: no_data_encryption
|
||||||
|
- Env Var: RCLONE_CRYPT_NO_DATA_ENCRYPTION
|
||||||
|
- Type: bool
|
||||||
|
- Default: false
|
||||||
|
- Examples:
|
||||||
|
- "true"
|
||||||
|
- Don't encrypt file data, leave it unencrypted.
|
||||||
|
- "false"
|
||||||
|
- Encrypt file data.
|
||||||
|
|
||||||
### Backend commands
|
### Backend commands
|
||||||
|
|
||||||
Here are the commands specific to the crypt backend.
|
Here are the commands specific to the crypt backend.
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ Configure
|
|||||||
|
|
||||||
First, you'll need to configure rclone. As the object storage systems
|
First, you'll need to configure rclone. As the object storage systems
|
||||||
have quite complicated authentication these are kept in a config file.
|
have quite complicated authentication these are kept in a config file.
|
||||||
(See the `--config` entry for how to find the config file and choose
|
(See the [`--config`](#config-config-file) entry for how to find the config
|
||||||
its location.)
|
file and choose its location.)
|
||||||
|
|
||||||
The easiest way to make the config is to run rclone with the config
|
The easiest way to make the config is to run rclone with the config
|
||||||
option:
|
option:
|
||||||
@@ -208,6 +208,108 @@ To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`
|
|||||||
To copy files and directories from `example.com` in the relative
|
To copy files and directories from `example.com` in the relative
|
||||||
directory `path/to/dir` to `/tmp/dir` using sftp.
|
directory `path/to/dir` to `/tmp/dir` using sftp.
|
||||||
|
|
||||||
|
### Connection strings {#connection-strings}
|
||||||
|
|
||||||
|
The above examples can also be written using a connection string
|
||||||
|
syntax, so instead of providing the arguments as command line
|
||||||
|
parameters `--http-url https://pub.rclone.org` they are provided as
|
||||||
|
part of the remote specification as a kind of connection string.
|
||||||
|
|
||||||
|
rclone lsd ":http,url='https://pub.rclone.org':"
|
||||||
|
rclone lsf ":http,url='https://example.com':path/to/dir"
|
||||||
|
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
|
||||||
|
rclone copy :sftp,host=example.com:path/to/dir /tmp/dir
|
||||||
|
|
||||||
|
These can apply to modify existing remotes as well as create new
|
||||||
|
remotes with the on the fly syntax. This example is equivalent to
|
||||||
|
adding the `--drive-shared-with-me` parameter to the remote `gdrive:`.
|
||||||
|
|
||||||
|
rclone lsf "gdrive,shared_with_me:path/to/dir"
|
||||||
|
|
||||||
|
The major advantage to using the connection string style syntax is
|
||||||
|
that it only applies the the remote, not to all the remotes of that
|
||||||
|
type of the command line. A common confusion is this attempt to copy a
|
||||||
|
file shared on google drive to the normal drive which **does not
|
||||||
|
work** because the `--drive-shared-with-me` flag applies to both the
|
||||||
|
source and the destination.
|
||||||
|
|
||||||
|
rclone copy --drive-shared-with-me gdrive:shared-file.txt gdrive:
|
||||||
|
|
||||||
|
However using the connection string syntax, this does work.
|
||||||
|
|
||||||
|
rclone copy "gdrive,shared_with_me:shared-file.txt" gdrive:
|
||||||
|
|
||||||
|
The connection strings have the following syntax
|
||||||
|
|
||||||
|
remote,parameter=value,parameter2=value2:path/to/dir
|
||||||
|
:backend,parameter=value,parameter2=value2:path/to/dir
|
||||||
|
|
||||||
|
If the `parameter` has a `:` or `,` then it must be placed in quotes `"` or
|
||||||
|
`'`, so
|
||||||
|
|
||||||
|
remote,parameter="colon:value",parameter2="comma,value":path/to/dir
|
||||||
|
:backend,parameter='colon:value',parameter2='comma,value':path/to/dir
|
||||||
|
|
||||||
|
If a quoted value needs to include that quote, then it should be
|
||||||
|
doubled, so
|
||||||
|
|
||||||
|
remote,parameter="with""quote",parameter2='with''quote':path/to/dir
|
||||||
|
|
||||||
|
This will make `parameter` be `with"quote` and `parameter2` be
|
||||||
|
`with'quote`.
|
||||||
|
|
||||||
|
If you leave off the `=parameter` then rclone will substitute `=true`
|
||||||
|
which works very well with flags. For example to use s3 configured in
|
||||||
|
the environment you could use:
|
||||||
|
|
||||||
|
rclone lsd :s3,env_auth:
|
||||||
|
|
||||||
|
Which is equivalent to
|
||||||
|
|
||||||
|
rclone lsd :s3,env_auth=true:
|
||||||
|
|
||||||
|
Note that on the command line you might need to surround these
|
||||||
|
connection strings with `"` or `'` to stop the shell interpreting any
|
||||||
|
special characters within them.
|
||||||
|
|
||||||
|
If you are a shell master then you'll know which strings are OK and
|
||||||
|
which aren't, but if you aren't sure then enclose them in `"` and use
|
||||||
|
`'` as the inside quote. This syntax works on all OSes.
|
||||||
|
|
||||||
|
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
|
||||||
|
|
||||||
|
On Linux/macOS some characters are still interpreted inside `"`
|
||||||
|
strings in the shell (notably `\` and `$` and `"`) so if your strings
|
||||||
|
contain those you can swap the roles of `"` and `'` thus. (This syntax
|
||||||
|
does not work on Windows.)
|
||||||
|
|
||||||
|
rclone copy ':http,url="https://example.com":path/to/dir' /tmp/dir
|
||||||
|
|
||||||
|
#### Connection strings, config and logging
|
||||||
|
|
||||||
|
If you supply extra configuration to a backend by command line flag,
|
||||||
|
environment variable or connection string then rclone will add a
|
||||||
|
suffix based on the hash of the config to the name of the remote, eg
|
||||||
|
|
||||||
|
rclone -vv lsf --s3-chunk-size 20M s3:
|
||||||
|
|
||||||
|
Has the log message
|
||||||
|
|
||||||
|
DEBUG : s3: detected overridden config - adding "{Srj1p}" suffix to name
|
||||||
|
|
||||||
|
This is so rclone can tell the modified remote apart from the
|
||||||
|
unmodified remote when caching the backends.
|
||||||
|
|
||||||
|
This should only be noticeable in the logs.
|
||||||
|
|
||||||
|
This means that on the fly backends such as
|
||||||
|
|
||||||
|
rclone -vv lsf :s3,env_auth:
|
||||||
|
|
||||||
|
Will get their own names
|
||||||
|
|
||||||
|
DEBUG : :s3: detected overridden config - adding "{YTu53}" suffix to name
|
||||||
|
|
||||||
### Valid remote names
|
### Valid remote names
|
||||||
|
|
||||||
- Remote names may only contain 0-9, A-Z ,a-z ,_ , - and space.
|
- Remote names may only contain 0-9, A-Z ,a-z ,_ , - and space.
|
||||||
@@ -537,7 +639,7 @@ See `--copy-dest` and `--backup-dir`.
|
|||||||
|
|
||||||
### --config=CONFIG_FILE ###
|
### --config=CONFIG_FILE ###
|
||||||
|
|
||||||
Specify the location of the rclone config file.
|
Specify the location of the rclone configuration file.
|
||||||
|
|
||||||
Normally the config file is in your home directory as a file called
|
Normally the config file is in your home directory as a file called
|
||||||
`.config/rclone/rclone.conf` (or `.rclone.conf` if created with an
|
`.config/rclone/rclone.conf` (or `.rclone.conf` if created with an
|
||||||
@@ -552,10 +654,46 @@ If you run `rclone config file` you will see where the default
|
|||||||
location is for you.
|
location is for you.
|
||||||
|
|
||||||
Use this flag to override the config location, e.g. `rclone
|
Use this flag to override the config location, e.g. `rclone
|
||||||
--config=".myconfig" .config`.
|
--config=".myconfig" config`.
|
||||||
|
|
||||||
If this is set to empty string or the special value `/notfound` then
|
If the location is set to empty string `""` or the special value
|
||||||
rclone will keep the config file in memory only.
|
`/notfound`, or the os null device represented by value `NUL` on
|
||||||
|
Windows and `/dev/null` on Unix systems, then rclone will keep the
|
||||||
|
config file in memory only.
|
||||||
|
|
||||||
|
The file format is basic [INI](https://en.wikipedia.org/wiki/INI_file#Format):
|
||||||
|
Sections of text, led by a `[section]` header and followed by
|
||||||
|
`key=value` entries on separate lines. In rclone each remote is
|
||||||
|
represented by its own section, where the section name defines the
|
||||||
|
name of the remote. Options are specified as the `key=value` entries,
|
||||||
|
where the key is the option name without the `--backend-` prefix,
|
||||||
|
in lowercase and with `_` instead of `-`. E.g. option `--mega-hard-delete`
|
||||||
|
corresponds to key `hard_delete`. Only backend options can be specified.
|
||||||
|
A special, and required, key `type` identifies the [storage system](/overview/),
|
||||||
|
where the value is the internal lowercase name as returned by command
|
||||||
|
`rclone help backends`. Comments are indicated by `;` or `#` at the
|
||||||
|
beginning of a line.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
[megaremote]
|
||||||
|
type = mega
|
||||||
|
user = you@example.com
|
||||||
|
pass = PDPcQVVjVtzFY-GTdDFozqBhTdsPg3qH
|
||||||
|
|
||||||
|
Note that passwords are in [obscured](/commands/rclone_obscure/)
|
||||||
|
form. Also, many storage systems uses token-based authentication instead
|
||||||
|
of passwords, and this requires additional steps. It is easier, and safer,
|
||||||
|
to use the interactive command `rclone config` instead of manually
|
||||||
|
editing the configuration file.
|
||||||
|
|
||||||
|
The configuration file will typically contain login information, and
|
||||||
|
should therefore have restricted permissions so that only the current user
|
||||||
|
can read it. Rclone tries to ensure this when it writes the file.
|
||||||
|
You may also choose to [encrypt](#configuration-encryption) the file.
|
||||||
|
|
||||||
|
When token-based authentication are used, the configuration file
|
||||||
|
must be writable, because rclone needs to update the tokens inside it.
|
||||||
|
|
||||||
### --contimeout=TIME ###
|
### --contimeout=TIME ###
|
||||||
|
|
||||||
@@ -649,6 +787,27 @@ triggering follow-on actions if data was copied, or skipping if not.
|
|||||||
NB: Enabling this option turns a usually non-fatal error into a potentially
|
NB: Enabling this option turns a usually non-fatal error into a potentially
|
||||||
fatal one - please check and adjust your scripts accordingly!
|
fatal one - please check and adjust your scripts accordingly!
|
||||||
|
|
||||||
|
### --fs-cache-expire-duration=TIME
|
||||||
|
|
||||||
|
When using rclone via the API rclone caches created remotes for 5
|
||||||
|
minutes by default in the "fs cache". This means that if you do
|
||||||
|
repeated actions on the same remote then rclone won't have to build it
|
||||||
|
again from scratch, which makes it more efficient.
|
||||||
|
|
||||||
|
This flag sets the time that the remotes are cached for. If you set it
|
||||||
|
to `0` (or negative) then rclone won't cache the remotes at all.
|
||||||
|
|
||||||
|
Note that if you use some flags, eg `--backup-dir` and if this is set
|
||||||
|
to `0` rclone may build two remotes (one for the source or destination
|
||||||
|
and one for the `--backup-dir` where it may have only built one
|
||||||
|
before.
|
||||||
|
|
||||||
|
### --fs-cache-expire-interval=TIME
|
||||||
|
|
||||||
|
This controls how often rclone checks for cached remotes to expire.
|
||||||
|
See the `--fs-cache-expire-duration` documentation above for more
|
||||||
|
info. The default is 60s, set to 0 to disable expiry.
|
||||||
|
|
||||||
### --header ###
|
### --header ###
|
||||||
|
|
||||||
Add an HTTP header for all transactions. The flag can be repeated to
|
Add an HTTP header for all transactions. The flag can be repeated to
|
||||||
@@ -1620,7 +1779,7 @@ Configuration Encryption
|
|||||||
------------------------
|
------------------------
|
||||||
Your configuration file contains information for logging in to
|
Your configuration file contains information for logging in to
|
||||||
your cloud services. This means that you should keep your
|
your cloud services. This means that you should keep your
|
||||||
`.rclone.conf` file in a secure location.
|
`rclone.conf` file in a secure location.
|
||||||
|
|
||||||
If you are in an environment where that isn't possible, you can
|
If you are in an environment where that isn't possible, you can
|
||||||
add a password to your configuration. This means that you will
|
add a password to your configuration. This means that you will
|
||||||
@@ -1726,6 +1885,16 @@ password prompts. To do that, pass the parameter
|
|||||||
of asking for a password if `RCLONE_CONFIG_PASS` doesn't contain
|
of asking for a password if `RCLONE_CONFIG_PASS` doesn't contain
|
||||||
a valid password, and `--password-command` has not been supplied.
|
a valid password, and `--password-command` has not been supplied.
|
||||||
|
|
||||||
|
Some rclone commands, such as `genautocomplete`, do not require configuration.
|
||||||
|
Nevertheless, rclone will read any configuration file found
|
||||||
|
according to the rules described [above](https://rclone.org/docs/#config-config-file).
|
||||||
|
If an encrypted configuration file is found, this means you will be prompted for
|
||||||
|
password (unless using `--password-command`). To avoid this, you can bypass
|
||||||
|
the loading of the configuration file by overriding the location with an empty
|
||||||
|
string `""` or the special value `/notfound`, or the os null device represented
|
||||||
|
by value `NUL` on Windows and `/dev/null` on Unix systems (before rclone
|
||||||
|
version 1.55 only this null device alternative was supported).
|
||||||
|
E.g. `rclone --config="" genautocomplete bash`.
|
||||||
|
|
||||||
Developer options
|
Developer options
|
||||||
-----------------
|
-----------------
|
||||||
@@ -1927,11 +2096,8 @@ so they take exactly the same form.
|
|||||||
### Config file ###
|
### Config file ###
|
||||||
|
|
||||||
You can set defaults for values in the config file on an individual
|
You can set defaults for values in the config file on an individual
|
||||||
remote basis. If you want to use this feature, you will need to
|
remote basis. The names of the config items are documented in the page
|
||||||
discover the name of the config items that you want. The easiest way
|
for each backend.
|
||||||
is to run through `rclone config` by hand, then look in the config
|
|
||||||
file to see what the values are (the config file can be found by
|
|
||||||
looking at the help for `--config` in `rclone help`).
|
|
||||||
|
|
||||||
To find the name of the environment variable, you need to set, take
|
To find the name of the environment variable, you need to set, take
|
||||||
`RCLONE_CONFIG_` + name of remote + `_` + name of config file option
|
`RCLONE_CONFIG_` + name of remote + `_` + name of config file option
|
||||||
@@ -1953,6 +2119,11 @@ mys3:
|
|||||||
Note that if you want to create a remote using environment variables
|
Note that if you want to create a remote using environment variables
|
||||||
you must create the `..._TYPE` variable as above.
|
you must create the `..._TYPE` variable as above.
|
||||||
|
|
||||||
|
Note also that now rclone has [connection strings](#connection-strings),
|
||||||
|
it is probably easier to use those instead which makes the above example
|
||||||
|
|
||||||
|
rclone lsd :s3,access_key_id=XXX,secret_access_key=XXX:
|
||||||
|
|
||||||
### Precedence
|
### Precedence
|
||||||
|
|
||||||
The various different methods of backend configuration are read in
|
The various different methods of backend configuration are read in
|
||||||
|
|||||||
@@ -197,6 +197,21 @@ memory. It can be set smaller if you are tight on memory.
|
|||||||
|
|
||||||
Impersonate this user when using a business account.
|
Impersonate this user when using a business account.
|
||||||
|
|
||||||
|
Note that if you want to use impersonate, you should make sure this
|
||||||
|
flag is set when running "rclone config" as this will cause rclone to
|
||||||
|
request the "members.read" scope which it won't normally. This is
|
||||||
|
needed to lookup a members email address into the internal ID that
|
||||||
|
dropbox uses in the API.
|
||||||
|
|
||||||
|
Using the "members.read" scope will require a Dropbox Team Admin
|
||||||
|
to approve during the OAuth flow.
|
||||||
|
|
||||||
|
You will have to use your own App (setting your own client_id and
|
||||||
|
client_secret) to use this option as currently rclone's default set of
|
||||||
|
permissions doesn't include "members.read". This can be added once
|
||||||
|
v1.55 or later is in use everywhere.
|
||||||
|
|
||||||
|
|
||||||
- Config: impersonate
|
- Config: impersonate
|
||||||
- Env Var: RCLONE_DROPBOX_IMPERSONATE
|
- Env Var: RCLONE_DROPBOX_IMPERSONATE
|
||||||
- Type: string
|
- Type: string
|
||||||
@@ -270,6 +285,12 @@ dropbox:dir` will return the error `Failed to purge: There are too
|
|||||||
many files involved in this operation`. As a work-around do an
|
many files involved in this operation`. As a work-around do an
|
||||||
`rclone delete dropbox:dir` followed by an `rclone rmdir dropbox:dir`.
|
`rclone delete dropbox:dir` followed by an `rclone rmdir dropbox:dir`.
|
||||||
|
|
||||||
|
When using `rclone link` you'll need to set `--expire` if using a
|
||||||
|
non-personal account otherwise the visibility may not be correct.
|
||||||
|
(Note that `--expire` isn't supported on personal accounts). See the
|
||||||
|
[forum discussion](https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211) and the
|
||||||
|
[dropbox SDK issue](https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75).
|
||||||
|
|
||||||
### Get your own Dropbox App ID ###
|
### Get your own Dropbox App ID ###
|
||||||
|
|
||||||
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
|
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
|
||||||
|
|||||||
@@ -236,6 +236,28 @@ Option `exclude-if-present` creates a directory exclude rule based
|
|||||||
on the presence of a file in a directory and takes precedence over
|
on the presence of a file in a directory and takes precedence over
|
||||||
other rclone directory filter rules.
|
other rclone directory filter rules.
|
||||||
|
|
||||||
|
When using pattern list syntax, if a pattern item contains either
|
||||||
|
`/` or `**`, then rclone will not able to imply a directory filter rule
|
||||||
|
from this pattern list.
|
||||||
|
|
||||||
|
E.g. for an include rule
|
||||||
|
|
||||||
|
{dir1/**,dir2/**}
|
||||||
|
|
||||||
|
Rclone will match files below directories `dir1` or `dir2` only,
|
||||||
|
but will not be able to use this filter to exclude a directory `dir3`
|
||||||
|
from being traversed.
|
||||||
|
|
||||||
|
Directory recursion optimisation may affect performance, but normally
|
||||||
|
not the result. One exception to this is sync operations with option
|
||||||
|
`--create-empty-src-dirs`, where any traversed empty directories will
|
||||||
|
be created. With the pattern list example `{dir1/**,dir2/**}` above,
|
||||||
|
this would create an empty directory `dir3` on destination (when it exists
|
||||||
|
on source). Changing the filter to `{dir1,dir2}/**`, or splitting it into
|
||||||
|
two include rules `--include dir1/** --include dir2/**`, will match the
|
||||||
|
same files while also filtering directories, with the result that an empty
|
||||||
|
directory `dir3` will no longer be created.
|
||||||
|
|
||||||
### `--exclude` - Exclude files matching pattern
|
### `--exclude` - Exclude files matching pattern
|
||||||
|
|
||||||
Excludes path/file names from an rclone command based on a single exclude
|
Excludes path/file names from an rclone command based on a single exclude
|
||||||
@@ -396,7 +418,7 @@ processed in.
|
|||||||
Arrange the order of filter rules with the most restrictive first and
|
Arrange the order of filter rules with the most restrictive first and
|
||||||
work down.
|
work down.
|
||||||
|
|
||||||
E.g. For `filter-file.txt`:
|
E.g. for `filter-file.txt`:
|
||||||
|
|
||||||
# a sample filter rule file
|
# a sample filter rule file
|
||||||
- secret*.jpg
|
- secret*.jpg
|
||||||
|
|||||||
@@ -27,10 +27,10 @@ These flags are available for every command.
|
|||||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||||
--client-cert string Client SSL certificate (PEM) for mutual TLS auth
|
--client-cert string Client SSL certificate (PEM) for mutual TLS auth
|
||||||
--client-key string Client SSL private key (PEM) for mutual TLS auth
|
--client-key string Client SSL private key (PEM) for mutual TLS auth
|
||||||
--compare-dest string Include additional server-side path during comparison.
|
--compare-dest stringArray Include additional comma separated server-side paths during comparison.
|
||||||
--config string Config file. (default "$HOME/.config/rclone/rclone.conf")
|
--config string Config file. (default "$HOME/.config/rclone/rclone.conf")
|
||||||
--contimeout duration Connect timeout (default 1m0s)
|
--contimeout duration Connect timeout (default 1m0s)
|
||||||
--copy-dest string Implies --compare-dest but also copies files from path into destination.
|
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination.
|
||||||
--cpuprofile string Write cpu profile to file
|
--cpuprofile string Write cpu profile to file
|
||||||
--cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
|
--cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
|
||||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||||
@@ -39,10 +39,10 @@ These flags are available for every command.
|
|||||||
--delete-excluded Delete files on dest excluded from sync
|
--delete-excluded Delete files on dest excluded from sync
|
||||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||||
-n, --dry-run Do a trial run with no permanent changes
|
-n, --dry-run Do a trial run with no permanent changes
|
||||||
|
--dscp string Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.
|
||||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||||
--dump-headers Dump HTTP headers - may contain sensitive info
|
--dump-headers Dump HTTP headers - may contain sensitive info
|
||||||
--dscp DSCP Name or Value (default 0)
|
|
||||||
--error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
|
--error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
|
||||||
--exclude stringArray Exclude files matching pattern
|
--exclude stringArray Exclude files matching pattern
|
||||||
--exclude-from stringArray Read exclude patterns from file (use - to read from stdin)
|
--exclude-from stringArray Read exclude patterns from file (use - to read from stdin)
|
||||||
@@ -53,6 +53,8 @@ These flags are available for every command.
|
|||||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||||
-f, --filter stringArray Add a file-filtering rule
|
-f, --filter stringArray Add a file-filtering rule
|
||||||
--filter-from stringArray Read filtering patterns from a file (use - to read from stdin)
|
--filter-from stringArray Read filtering patterns from a file (use - to read from stdin)
|
||||||
|
--fs-cache-expire-duration duration cache remotes for this long (0 to disable caching) (default 5m0s)
|
||||||
|
--fs-cache-expire-interval duration interval to check for expired remotes (default 1m0s)
|
||||||
--header stringArray Set HTTP header for all transactions
|
--header stringArray Set HTTP header for all transactions
|
||||||
--header-download stringArray Set HTTP header for download transactions
|
--header-download stringArray Set HTTP header for download transactions
|
||||||
--header-upload stringArray Set HTTP header for upload transactions
|
--header-upload stringArray Set HTTP header for upload transactions
|
||||||
@@ -151,7 +153,7 @@ These flags are available for every command.
|
|||||||
--use-json-log Use json log format.
|
--use-json-log Use json log format.
|
||||||
--use-mmap Use mmap allocator (see docs).
|
--use-mmap Use mmap allocator (see docs).
|
||||||
--use-server-modtime Use server modified time instead of object metadata
|
--use-server-modtime Use server modified time instead of object metadata
|
||||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.0")
|
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.55.0")
|
||||||
-v, --verbose count Print lots more stuff (repeat for more)
|
-v, --verbose count Print lots more stuff (repeat for more)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -184,6 +186,7 @@ and may be set in the config file.
|
|||||||
--azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.
|
--azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.
|
||||||
--azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.
|
--azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.
|
||||||
--azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.
|
--azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.
|
||||||
|
--azureblob-public-access string Public access level of a container: blob, container.
|
||||||
--azureblob-sas-url string SAS URL for container level access only
|
--azureblob-sas-url string SAS URL for container level access only
|
||||||
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal.
|
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal.
|
||||||
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
|
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
|
||||||
@@ -247,6 +250,7 @@ and may be set in the config file.
|
|||||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||||
|
--crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted.
|
||||||
--crypt-password string Password or pass phrase for encryption. (obscured)
|
--crypt-password string Password or pass phrase for encryption. (obscured)
|
||||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended. (obscured)
|
--crypt-password2 string Password or pass phrase for salt. Optional but recommended. (obscured)
|
||||||
--crypt-remote string Remote to encrypt/decrypt.
|
--crypt-remote string Remote to encrypt/decrypt.
|
||||||
@@ -282,7 +286,7 @@ and may be set in the config file.
|
|||||||
--drive-starred-only Only show files that are starred.
|
--drive-starred-only Only show files that are starred.
|
||||||
--drive-stop-on-download-limit Make download limit errors be fatal
|
--drive-stop-on-download-limit Make download limit errors be fatal
|
||||||
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
||||||
--drive-team-drive string ID of the Team Drive
|
--drive-team-drive string ID of the Shared Drive (Team Drive)
|
||||||
--drive-token string OAuth Access Token as a JSON blob.
|
--drive-token string OAuth Access Token as a JSON blob.
|
||||||
--drive-token-url string Token server url.
|
--drive-token-url string Token server url.
|
||||||
--drive-trashed-only Only show files that are in the trash.
|
--drive-trashed-only Only show files that are in the trash.
|
||||||
@@ -311,12 +315,14 @@ and may be set in the config file.
|
|||||||
--filefabric-token-expiry string Token expiry time
|
--filefabric-token-expiry string Token expiry time
|
||||||
--filefabric-url string URL of the Enterprise File Fabric to connect to
|
--filefabric-url string URL of the Enterprise File Fabric to connect to
|
||||||
--filefabric-version string Version read from the file fabric
|
--filefabric-version string Version read from the file fabric
|
||||||
|
--ftp-close-timeout Duration Maximum time to wait for a response to close. (default 1m0s)
|
||||||
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
|
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
|
||||||
--ftp-disable-epsv Disable using EPSV even if server advertises support
|
--ftp-disable-epsv Disable using EPSV even if server advertises support
|
||||||
--ftp-disable-mlsd Disable using MLSD even if server advertises support
|
--ftp-disable-mlsd Disable using MLSD even if server advertises support
|
||||||
--ftp-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,RightSpace,Dot)
|
--ftp-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,RightSpace,Dot)
|
||||||
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
|
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
|
||||||
--ftp-host string FTP host to connect to
|
--ftp-host string FTP host to connect to
|
||||||
|
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||||
--ftp-no-check-certificate Do not verify the TLS certificate of the server
|
--ftp-no-check-certificate Do not verify the TLS certificate of the server
|
||||||
--ftp-pass string FTP password (obscured)
|
--ftp-pass string FTP password (obscured)
|
||||||
--ftp-port string FTP port, leave blank to use default (21)
|
--ftp-port string FTP port, leave blank to use default (21)
|
||||||
@@ -378,6 +384,7 @@ and may be set in the config file.
|
|||||||
--local-case-sensitive Force the filesystem to report itself as case sensitive.
|
--local-case-sensitive Force the filesystem to report itself as case sensitive.
|
||||||
--local-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Dot)
|
--local-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Dot)
|
||||||
--local-no-check-updated Don't check to see if the files change during upload
|
--local-no-check-updated Don't check to see if the files change during upload
|
||||||
|
--local-no-preallocate Disable preallocation of disk space for transferred files
|
||||||
--local-no-set-modtime Disable setting modtime
|
--local-no-set-modtime Disable setting modtime
|
||||||
--local-no-sparse Disable sparse files for multi-thread downloads
|
--local-no-sparse Disable sparse files for multi-thread downloads
|
||||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||||
@@ -408,6 +415,7 @@ and may be set in the config file.
|
|||||||
--onedrive-link-password string Set the password for links created by the link command.
|
--onedrive-link-password string Set the password for links created by the link command.
|
||||||
--onedrive-link-scope string Set the scope of the links created by the link command. (default "anonymous")
|
--onedrive-link-scope string Set the scope of the links created by the link command. (default "anonymous")
|
||||||
--onedrive-link-type string Set the type of the links created by the link command. (default "view")
|
--onedrive-link-type string Set the type of the links created by the link command. (default "view")
|
||||||
|
--onedrive-list-chunk int Size of listing chunk. (default 1000)
|
||||||
--onedrive-no-versions Remove all versions on modifying operations
|
--onedrive-no-versions Remove all versions on modifying operations
|
||||||
--onedrive-region string Choose national cloud region for OneDrive. (default "global")
|
--onedrive-region string Choose national cloud region for OneDrive. (default "global")
|
||||||
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||||
@@ -482,8 +490,10 @@ and may be set in the config file.
|
|||||||
--seafile-url string URL of seafile host to connect to
|
--seafile-url string URL of seafile host to connect to
|
||||||
--seafile-user string User name (usually email address)
|
--seafile-user string User name (usually email address)
|
||||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||||
|
--sftp-disable-concurrent-reads If set don't use concurrent reads
|
||||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||||
--sftp-host string SSH host to connect to
|
--sftp-host string SSH host to connect to
|
||||||
|
--sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file. (obscured)
|
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file. (obscured)
|
||||||
--sftp-key-pem string Raw PEM-encoded private key, If specified, will override key_file parameter.
|
--sftp-key-pem string Raw PEM-encoded private key, If specified, will override key_file parameter.
|
||||||
@@ -553,9 +563,10 @@ and may be set in the config file.
|
|||||||
--union-upstreams string List of space separated upstreams.
|
--union-upstreams string List of space separated upstreams.
|
||||||
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
|
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
|
||||||
--webdav-bearer-token-command string Command to run to get a bearer token
|
--webdav-bearer-token-command string Command to run to get a bearer token
|
||||||
|
--webdav-encoding string This sets the encoding for the backend.
|
||||||
--webdav-pass string Password. (obscured)
|
--webdav-pass string Password. (obscured)
|
||||||
--webdav-url string URL of http host to connect to
|
--webdav-url string URL of http host to connect to
|
||||||
--webdav-user string User name
|
--webdav-user string User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.
|
||||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||||
--yandex-auth-url string Auth server URL.
|
--yandex-auth-url string Auth server URL.
|
||||||
--yandex-client-id string OAuth Client Id
|
--yandex-client-id string OAuth Client Id
|
||||||
@@ -563,6 +574,11 @@ and may be set in the config file.
|
|||||||
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||||
--yandex-token string OAuth Access Token as a JSON blob.
|
--yandex-token string OAuth Access Token as a JSON blob.
|
||||||
--yandex-token-url string Token server url.
|
--yandex-token-url string Token server url.
|
||||||
|
--zoho-auth-url string Auth server URL.
|
||||||
|
--zoho-client-id string OAuth Client Id
|
||||||
|
--zoho-client-secret string OAuth Client Secret
|
||||||
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
|
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
|
||||||
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
|
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||||
|
--zoho-token string OAuth Access Token as a JSON blob.
|
||||||
|
--zoho-token-url string Token server url.
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -223,6 +223,30 @@ Disable using MLSD even if server advertises support
|
|||||||
- Type: bool
|
- Type: bool
|
||||||
- Default: false
|
- Default: false
|
||||||
|
|
||||||
|
#### --ftp-idle-timeout
|
||||||
|
|
||||||
|
Max time before closing idle connections
|
||||||
|
|
||||||
|
If no connections have been returned to the connection pool in the time
|
||||||
|
given, rclone will empty the connection pool.
|
||||||
|
|
||||||
|
Set to 0 to keep connections indefinitely.
|
||||||
|
|
||||||
|
|
||||||
|
- Config: idle_timeout
|
||||||
|
- Env Var: RCLONE_FTP_IDLE_TIMEOUT
|
||||||
|
- Type: Duration
|
||||||
|
- Default: 1m0s
|
||||||
|
|
||||||
|
#### --ftp-close-timeout
|
||||||
|
|
||||||
|
Maximum time to wait for a response to close.
|
||||||
|
|
||||||
|
- Config: close_timeout
|
||||||
|
- Env Var: RCLONE_FTP_CLOSE_TIMEOUT
|
||||||
|
- Type: Duration
|
||||||
|
- Default: 1m0s
|
||||||
|
|
||||||
#### --ftp-encoding
|
#### --ftp-encoding
|
||||||
|
|
||||||
This sets the encoding for the backend.
|
This sets the encoding for the backend.
|
||||||
|
|||||||
@@ -25,13 +25,14 @@ fi
|
|||||||
|
|
||||||
|
|
||||||
#create tmp directory and move to it with macOS compatibility fallback
|
#create tmp directory and move to it with macOS compatibility fallback
|
||||||
tmp_dir=`mktemp -d 2>/dev/null || mktemp -d -t 'rclone-install.XXXXXXXXXX'`; cd $tmp_dir
|
tmp_dir=$(mktemp -d 2>/dev/null || mktemp -d -t 'rclone-install.XXXXXXXXXX')
|
||||||
|
cd "$tmp_dir"
|
||||||
|
|
||||||
|
|
||||||
#make sure unzip tool is available and choose one to work with
|
#make sure unzip tool is available and choose one to work with
|
||||||
set +e
|
set +e
|
||||||
for tool in ${unzip_tools_list[*]}; do
|
for tool in ${unzip_tools_list[*]}; do
|
||||||
trash=`hash $tool 2>>errors`
|
trash=$(hash "$tool" 2>>errors)
|
||||||
if [ "$?" -eq 0 ]; then
|
if [ "$?" -eq 0 ]; then
|
||||||
unzip_tool="$tool"
|
unzip_tool="$tool"
|
||||||
break
|
break
|
||||||
@@ -40,7 +41,7 @@ done
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
# exit if no unzip tools available
|
# exit if no unzip tools available
|
||||||
if [ -z "${unzip_tool}" ]; then
|
if [ -z "$unzip_tool" ]; then
|
||||||
printf "\nNone of the supported tools for extracting zip archives (${unzip_tools_list[*]}) were found. "
|
printf "\nNone of the supported tools for extracting zip archives (${unzip_tools_list[*]}) were found. "
|
||||||
printf "Please install one of them and try again.\n\n"
|
printf "Please install one of them and try again.\n\n"
|
||||||
exit 4
|
exit 4
|
||||||
@@ -50,11 +51,11 @@ fi
|
|||||||
export XDG_CONFIG_HOME=config
|
export XDG_CONFIG_HOME=config
|
||||||
|
|
||||||
#check installed version of rclone to determine if update is necessary
|
#check installed version of rclone to determine if update is necessary
|
||||||
version=`rclone --version 2>>errors | head -n 1`
|
version=$(rclone --version 2>>errors | head -n 1)
|
||||||
if [ -z "${install_beta}" ]; then
|
if [ -z "$install_beta" ]; then
|
||||||
current_version=`curl https://downloads.rclone.org/version.txt`
|
current_version=$(curl -f https://downloads.rclone.org/version.txt)
|
||||||
else
|
else
|
||||||
current_version=`curl https://beta.rclone.org/version.txt`
|
current_version=$(curl -f https://beta.rclone.org/version.txt)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$version" = "$current_version" ]; then
|
if [ "$version" = "$current_version" ]; then
|
||||||
@@ -63,9 +64,8 @@ if [ "$version" = "$current_version" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#detect the platform
|
#detect the platform
|
||||||
OS="`uname`"
|
OS="$(uname)"
|
||||||
case $OS in
|
case $OS in
|
||||||
Linux)
|
Linux)
|
||||||
OS='linux'
|
OS='linux'
|
||||||
@@ -93,20 +93,20 @@ case $OS in
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
OS_type="`uname -m`"
|
OS_type="$(uname -m)"
|
||||||
case $OS_type in
|
case "$OS_type" in
|
||||||
x86_64|amd64)
|
x86_64|amd64)
|
||||||
OS_type='amd64'
|
OS_type='amd64'
|
||||||
;;
|
;;
|
||||||
i?86|x86)
|
i?86|x86)
|
||||||
OS_type='386'
|
OS_type='386'
|
||||||
;;
|
;;
|
||||||
|
aarch64|arm64)
|
||||||
|
OS_type='arm64'
|
||||||
|
;;
|
||||||
arm*)
|
arm*)
|
||||||
OS_type='arm'
|
OS_type='arm'
|
||||||
;;
|
;;
|
||||||
aarch64)
|
|
||||||
OS_type='arm64'
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo 'OS type not supported'
|
echo 'OS type not supported'
|
||||||
exit 2
|
exit 2
|
||||||
@@ -115,43 +115,42 @@ esac
|
|||||||
|
|
||||||
|
|
||||||
#download and unzip
|
#download and unzip
|
||||||
if [ -z "${install_beta}" ]; then
|
if [ -z "$install_beta" ]; then
|
||||||
download_link="https://downloads.rclone.org/rclone-current-$OS-$OS_type.zip"
|
download_link="https://downloads.rclone.org/rclone-current-${OS}-${OS_type}.zip"
|
||||||
rclone_zip="rclone-current-$OS-$OS_type.zip"
|
rclone_zip="rclone-current-${OS}-${OS_type}.zip"
|
||||||
else
|
else
|
||||||
download_link="https://beta.rclone.org/rclone-beta-latest-$OS-$OS_type.zip"
|
download_link="https://beta.rclone.org/rclone-beta-latest-${OS}-${OS_type}.zip"
|
||||||
rclone_zip="rclone-beta-latest-$OS-$OS_type.zip"
|
rclone_zip="rclone-beta-latest-${OS}-${OS_type}.zip"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
curl -O $download_link
|
curl -Of "$download_link"
|
||||||
unzip_dir="tmp_unzip_dir_for_rclone"
|
unzip_dir="tmp_unzip_dir_for_rclone"
|
||||||
# there should be an entry in this switch for each element of unzip_tools_list
|
# there should be an entry in this switch for each element of unzip_tools_list
|
||||||
case $unzip_tool in
|
case "$unzip_tool" in
|
||||||
'unzip')
|
'unzip')
|
||||||
unzip -a $rclone_zip -d $unzip_dir
|
unzip -a "$rclone_zip" -d "$unzip_dir"
|
||||||
;;
|
;;
|
||||||
'7z')
|
'7z')
|
||||||
7z x $rclone_zip -o$unzip_dir
|
7z x "$rclone_zip" "-o$unzip_dir"
|
||||||
;;
|
;;
|
||||||
'busybox')
|
'busybox')
|
||||||
mkdir -p $unzip_dir
|
mkdir -p "$unzip_dir"
|
||||||
busybox unzip $rclone_zip -d $unzip_dir
|
busybox unzip "$rclone_zip" -d "$unzip_dir"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
cd $unzip_dir/*
|
cd $unzip_dir/*
|
||||||
|
|
||||||
|
|
||||||
#mounting rclone to environment
|
#mounting rclone to environment
|
||||||
|
|
||||||
case $OS in
|
case "$OS" in
|
||||||
'linux')
|
'linux')
|
||||||
#binary
|
#binary
|
||||||
cp rclone /usr/bin/rclone.new
|
cp rclone /usr/bin/rclone.new
|
||||||
chmod 755 /usr/bin/rclone.new
|
chmod 755 /usr/bin/rclone.new
|
||||||
chown root:root /usr/bin/rclone.new
|
chown root:root /usr/bin/rclone.new
|
||||||
mv /usr/bin/rclone.new /usr/bin/rclone
|
mv /usr/bin/rclone.new /usr/bin/rclone
|
||||||
#manuals
|
#manual
|
||||||
if ! [ -x "$(command -v mandb)" ]; then
|
if ! [ -x "$(command -v mandb)" ]; then
|
||||||
echo 'mandb not found. The rclone man docs will not be installed.'
|
echo 'mandb not found. The rclone man docs will not be installed.'
|
||||||
else
|
else
|
||||||
@@ -161,11 +160,11 @@ case $OS in
|
|||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
'freebsd'|'openbsd'|'netbsd')
|
'freebsd'|'openbsd'|'netbsd')
|
||||||
#bin
|
#binary
|
||||||
cp rclone /usr/bin/rclone.new
|
cp rclone /usr/bin/rclone.new
|
||||||
chown root:wheel /usr/bin/rclone.new
|
chown root:wheel /usr/bin/rclone.new
|
||||||
mv /usr/bin/rclone.new /usr/bin/rclone
|
mv /usr/bin/rclone.new /usr/bin/rclone
|
||||||
#man
|
#manual
|
||||||
mkdir -p /usr/local/man/man1
|
mkdir -p /usr/local/man/man1
|
||||||
cp rclone.1 /usr/local/man/man1/
|
cp rclone.1 /usr/local/man/man1/
|
||||||
makewhatis
|
makewhatis
|
||||||
@@ -186,7 +185,7 @@ esac
|
|||||||
|
|
||||||
|
|
||||||
#update version variable post install
|
#update version variable post install
|
||||||
version=`rclone --version 2>>errors | head -n 1`
|
version=$(rclone --version 2>>errors | head -n 1)
|
||||||
|
|
||||||
printf "\n${version} has successfully installed."
|
printf "\n${version} has successfully installed."
|
||||||
printf '\nNow run "rclone config" for setup. Check https://rclone.org/docs/ for more details.\n\n'
|
printf '\nNow run "rclone config" for setup. Check https://rclone.org/docs/ for more details.\n\n'
|
||||||
|
|||||||
@@ -320,9 +320,9 @@ filesystem.
|
|||||||
where it isn't supported (e.g. Windows) it will be ignored.
|
where it isn't supported (e.g. Windows) it will be ignored.
|
||||||
|
|
||||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/local/local.go then run make backenddocs" >}}
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/local/local.go then run make backenddocs" >}}
|
||||||
### Standard Options
|
### Advanced Options
|
||||||
|
|
||||||
Here are the standard options specific to local (Local Disk).
|
Here are the advanced options specific to local (Local Disk).
|
||||||
|
|
||||||
#### --local-nounc
|
#### --local-nounc
|
||||||
|
|
||||||
@@ -336,10 +336,6 @@ Disable UNC (long path names) conversion on Windows
|
|||||||
- "true"
|
- "true"
|
||||||
- Disables long file names
|
- Disables long file names
|
||||||
|
|
||||||
### Advanced Options
|
|
||||||
|
|
||||||
Here are the advanced options specific to local (Local Disk).
|
|
||||||
|
|
||||||
#### --copy-links / -L
|
#### --copy-links / -L
|
||||||
|
|
||||||
Follow symlinks and copy the pointed to item.
|
Follow symlinks and copy the pointed to item.
|
||||||
|
|||||||
@@ -325,6 +325,15 @@ fall back to normal copy (which will be slightly slower).
|
|||||||
- Type: bool
|
- Type: bool
|
||||||
- Default: false
|
- Default: false
|
||||||
|
|
||||||
|
#### --onedrive-list-chunk
|
||||||
|
|
||||||
|
Size of listing chunk.
|
||||||
|
|
||||||
|
- Config: list_chunk
|
||||||
|
- Env Var: RCLONE_ONEDRIVE_LIST_CHUNK
|
||||||
|
- Type: int
|
||||||
|
- Default: 1000
|
||||||
|
|
||||||
#### --onedrive-no-versions
|
#### --onedrive-no-versions
|
||||||
|
|
||||||
Remove all versions on modifying operations
|
Remove all versions on modifying operations
|
||||||
|
|||||||
@@ -378,6 +378,55 @@ call and taken by the [options/set](#options-set) calls as well as the
|
|||||||
- `BandwidthSpec` - this will be set and returned as a string, eg
|
- `BandwidthSpec` - this will be set and returned as a string, eg
|
||||||
"1M".
|
"1M".
|
||||||
|
|
||||||
|
## Specifying remotes to work on
|
||||||
|
|
||||||
|
Remotes are specified with the `fs=`, `srcFs=`, `dstFs=`
|
||||||
|
parameters depending on the command being used.
|
||||||
|
|
||||||
|
The parameters can be a string as per the rest of rclone, eg
|
||||||
|
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
|
||||||
|
JSON blobs.
|
||||||
|
|
||||||
|
If specifyng a JSON blob it should be a object mapping strings to
|
||||||
|
strings. These values will be used to configure the remote. There are
|
||||||
|
3 special values which may be set:
|
||||||
|
|
||||||
|
- `type` - set to `type` to specify a remote called `:type:`
|
||||||
|
- `_name` - set to `name` to specify a remote called `name:`
|
||||||
|
- `_root` - sets the root of the remote - may be empty
|
||||||
|
|
||||||
|
One of `_name` or `type` should normally be set. If the `local`
|
||||||
|
backend is desired then `type` should be set to `local`. If `_root`
|
||||||
|
isn't specified then it defaults to the root of the remote.
|
||||||
|
|
||||||
|
For example this JSON is equivalent to `remote:/tmp`
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"_name": "remote",
|
||||||
|
"_path": "/tmp"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
And this is equivalent to `:sftp,host='example.com':/tmp`
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"type": "sftp",
|
||||||
|
"host": "example.com",
|
||||||
|
"_path": "/tmp"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
And this is equivalent to `/tmp/dir`
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
type = "local",
|
||||||
|
_ path = "/tmp/dir"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Supported commands
|
## Supported commands
|
||||||
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
|
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
|
||||||
### backend/command: Runs a backend command. {#backend-command}
|
### backend/command: Runs a backend command. {#backend-command}
|
||||||
@@ -716,18 +765,22 @@ Returns the following values:
|
|||||||
|
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"speed": average speed in bytes/sec since start of the process,
|
"bytes": total transferred bytes since the start of the group,
|
||||||
"bytes": total transferred bytes since the start of the process,
|
"checks": number of files checked,
|
||||||
|
"deletes" : number of files deleted,
|
||||||
|
"elapsedTime": time in floating point seconds since rclone was started,
|
||||||
"errors": number of errors,
|
"errors": number of errors,
|
||||||
"fatalError": whether there has been at least one FatalError,
|
"eta": estimated time in seconds until the group completes,
|
||||||
"retryError": whether there has been at least one non-NoRetryError,
|
"fatalError": boolean whether there has been at least one fatal error,
|
||||||
"checks": number of checked files,
|
"lastError": last error string,
|
||||||
"transfers": number of transferred files,
|
"renames" : number of files renamed,
|
||||||
"deletes" : number of deleted files,
|
"retryError": boolean showing whether there has been at least one non-NoRetryError,
|
||||||
"renames" : number of renamed files,
|
"speed": average speed in bytes/sec since start of the group,
|
||||||
|
"totalBytes": total number of bytes in the group,
|
||||||
|
"totalChecks": total number of checks in the group,
|
||||||
|
"totalTransfers": total number of transfers in the group,
|
||||||
"transferTime" : total time spent on running jobs,
|
"transferTime" : total time spent on running jobs,
|
||||||
"elapsedTime": time in seconds since the start of the process,
|
"transfers": number of transferred files,
|
||||||
"lastError": last occurred error,
|
|
||||||
"transferring": an array of currently active file transfers:
|
"transferring": an array of currently active file transfers:
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
@@ -808,6 +861,8 @@ This shows the current version of go and the go runtime
|
|||||||
- os - OS in use as according to Go
|
- os - OS in use as according to Go
|
||||||
- arch - cpu architecture in use according to Go
|
- arch - cpu architecture in use according to Go
|
||||||
- goVersion - version of Go runtime in use
|
- goVersion - version of Go runtime in use
|
||||||
|
- linking - type of rclone executable (static or dynamic)
|
||||||
|
- goTags - space separated build tags or "none"
|
||||||
|
|
||||||
### debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling. {#debug-set-block-profile-rate}
|
### debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling. {#debug-set-block-profile-rate}
|
||||||
|
|
||||||
@@ -847,6 +902,26 @@ Results
|
|||||||
|
|
||||||
- previousRate - int
|
- previousRate - int
|
||||||
|
|
||||||
|
### fscache/clear: Clear the Fs cache. {#fscache-clear}
|
||||||
|
|
||||||
|
This clears the fs cache. This is where remotes created from backends
|
||||||
|
are cached for a short while to make repeated rc calls more efficient.
|
||||||
|
|
||||||
|
If you change the parameters of a backend then you may want to call
|
||||||
|
this to clear an existing remote out of the cache before re-creating
|
||||||
|
it.
|
||||||
|
|
||||||
|
**Authentication is required for this call.**
|
||||||
|
|
||||||
|
### fscache/entries: Returns the number of entries in the fs cache. {#fscache-entries}
|
||||||
|
|
||||||
|
This returns the number of entries in the fs cache.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
- entries - number of items in the cache
|
||||||
|
|
||||||
|
**Authentication is required for this call.**
|
||||||
|
|
||||||
### job/list: Lists the IDs of the running jobs {#job-list}
|
### job/list: Lists the IDs of the running jobs {#job-list}
|
||||||
|
|
||||||
Parameters - None
|
Parameters - None
|
||||||
@@ -1207,6 +1282,7 @@ This takes the following parameters
|
|||||||
- fs - a remote name string e.g. "drive:"
|
- fs - a remote name string e.g. "drive:"
|
||||||
- remote - a path within that remote e.g. "dir"
|
- remote - a path within that remote e.g. "dir"
|
||||||
- each part in body represents a file to be uploaded
|
- each part in body represents a file to be uploaded
|
||||||
|
See the [uploadfile command](/commands/rclone_uploadfile/) command for more information on the above.
|
||||||
|
|
||||||
**Authentication is required for this call.**
|
**Authentication is required for this call.**
|
||||||
|
|
||||||
@@ -1215,11 +1291,31 @@ This takes the following parameters
|
|||||||
Returns
|
Returns
|
||||||
- options - a list of the options block names
|
- options - a list of the options block names
|
||||||
|
|
||||||
### options/get: Get all the options {#options-get}
|
### options/get: Get all the global options {#options-get}
|
||||||
|
|
||||||
Returns an object where keys are option block names and values are an
|
Returns an object where keys are option block names and values are an
|
||||||
object with the current option values in.
|
object with the current option values in.
|
||||||
|
|
||||||
|
Note that these are the global options which are unaffected by use of
|
||||||
|
the _config and _filter parameters. If you wish to read the parameters
|
||||||
|
set in _config then use options/config and for _filter use options/filter.
|
||||||
|
|
||||||
|
This shows the internal names of the option within rclone which should
|
||||||
|
map to the external options very easily with a few exceptions.
|
||||||
|
|
||||||
|
### options/local: Get the currently active config for this call {#options-local}
|
||||||
|
|
||||||
|
Returns an object with the keys "config" and "filter".
|
||||||
|
The "config" key contains the local config and the "filter" key contains
|
||||||
|
the local filters.
|
||||||
|
|
||||||
|
Note that these are the local options specific to this rc call. If
|
||||||
|
_config was not supplied then they will be the global options.
|
||||||
|
Likewise with "_filter".
|
||||||
|
|
||||||
|
This call is mostly useful for seeing if _config and _filter passing
|
||||||
|
is working.
|
||||||
|
|
||||||
This shows the internal names of the option within rclone which should
|
This shows the internal names of the option within rclone which should
|
||||||
map to the external options very easily with a few exceptions.
|
map to the external options very easily with a few exceptions.
|
||||||
|
|
||||||
@@ -1372,6 +1468,7 @@ This takes the following parameters
|
|||||||
|
|
||||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||||
|
- createEmptySrcDirs - create empty src directories on destination if set
|
||||||
|
|
||||||
|
|
||||||
See the [copy command](/commands/rclone_copy/) command for more information on the above.
|
See the [copy command](/commands/rclone_copy/) command for more information on the above.
|
||||||
@@ -1384,6 +1481,7 @@ This takes the following parameters
|
|||||||
|
|
||||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||||
|
- createEmptySrcDirs - create empty src directories on destination if set
|
||||||
- deleteEmptySrcDirs - delete empty src directories if set
|
- deleteEmptySrcDirs - delete empty src directories if set
|
||||||
|
|
||||||
|
|
||||||
@@ -1397,6 +1495,7 @@ This takes the following parameters
|
|||||||
|
|
||||||
- srcFs - a remote name string e.g. "drive:src" for the source
|
- srcFs - a remote name string e.g. "drive:src" for the source
|
||||||
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
- dstFs - a remote name string e.g. "drive:dst" for the destination
|
||||||
|
- createEmptySrcDirs - create empty src directories on destination if set
|
||||||
|
|
||||||
|
|
||||||
See the [sync command](/commands/rclone_sync/) command for more information on the above.
|
See the [sync command](/commands/rclone_sync/) command for more information on the above.
|
||||||
|
|||||||
@@ -496,6 +496,44 @@ any given time.
|
|||||||
- Type: bool
|
- Type: bool
|
||||||
- Default: false
|
- Default: false
|
||||||
|
|
||||||
|
#### --sftp-disable-concurrent-reads
|
||||||
|
|
||||||
|
If set don't use concurrent reads
|
||||||
|
|
||||||
|
Normally concurrent reads are safe to use and not using them will
|
||||||
|
degrade performance, so this option is disabled by default.
|
||||||
|
|
||||||
|
Some servers limit the amount number of times a file can be
|
||||||
|
downloaded. Using concurrent reads can trigger this limit, so if you
|
||||||
|
have a server which returns
|
||||||
|
|
||||||
|
Failed to copy: file does not exist
|
||||||
|
|
||||||
|
Then you may need to enable this flag.
|
||||||
|
|
||||||
|
If concurrent reads are disabled, the use_fstat option is ignored.
|
||||||
|
|
||||||
|
|
||||||
|
- Config: disable_concurrent_reads
|
||||||
|
- Env Var: RCLONE_SFTP_DISABLE_CONCURRENT_READS
|
||||||
|
- Type: bool
|
||||||
|
- Default: false
|
||||||
|
|
||||||
|
#### --sftp-idle-timeout
|
||||||
|
|
||||||
|
Max time before closing idle connections
|
||||||
|
|
||||||
|
If no connections have been returned to the connection pool in the time
|
||||||
|
given, rclone will empty the connection pool.
|
||||||
|
|
||||||
|
Set to 0 to keep connections indefinitely.
|
||||||
|
|
||||||
|
|
||||||
|
- Config: idle_timeout
|
||||||
|
- Env Var: RCLONE_SFTP_IDLE_TIMEOUT
|
||||||
|
- Type: Duration
|
||||||
|
- Default: 1m0s
|
||||||
|
|
||||||
{{< rem autogenerated options stop >}}
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
### Limitations ###
|
### Limitations ###
|
||||||
|
|||||||
@@ -137,23 +137,21 @@ Name of the Webdav site/service/software you are using
|
|||||||
- "owncloud"
|
- "owncloud"
|
||||||
- Owncloud
|
- Owncloud
|
||||||
- "sharepoint"
|
- "sharepoint"
|
||||||
- Sharepoint
|
- Sharepoint Online, authenticated by Microsoft account.
|
||||||
- "sharepoint-ntlm"
|
- "sharepoint-ntlm"
|
||||||
- Sharepoint with NTLM authentication
|
- Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
|
||||||
- "other"
|
- "other"
|
||||||
- Other site/service or software
|
- Other site/service or software
|
||||||
|
|
||||||
#### --webdav-user
|
#### --webdav-user
|
||||||
|
|
||||||
User name
|
User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.
|
||||||
|
|
||||||
- Config: user
|
- Config: user
|
||||||
- Env Var: RCLONE_WEBDAV_USER
|
- Env Var: RCLONE_WEBDAV_USER
|
||||||
- Type: string
|
- Type: string
|
||||||
- Default: ""
|
- Default: ""
|
||||||
|
|
||||||
In case vendor mode `sharepoint-ntlm` is used, the user name is in the form `DOMAIN\user`
|
|
||||||
|
|
||||||
#### --webdav-pass
|
#### --webdav-pass
|
||||||
|
|
||||||
Password.
|
Password.
|
||||||
@@ -187,6 +185,19 @@ Command to run to get a bearer token
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Default: ""
|
- Default: ""
|
||||||
|
|
||||||
|
#### --webdav-encoding
|
||||||
|
|
||||||
|
This sets the encoding for the backend.
|
||||||
|
|
||||||
|
See: the [encoding section in the overview](/overview/#encoding) for more info.
|
||||||
|
|
||||||
|
Default encoding is Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8 for sharepoint-ntlm or identity otherwise.
|
||||||
|
|
||||||
|
- Config: encoding
|
||||||
|
- Env Var: RCLONE_WEBDAV_ENCODING
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
{{< rem autogenerated options stop >}}
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
## Provider notes ##
|
## Provider notes ##
|
||||||
|
|||||||
@@ -128,6 +128,26 @@ from filenames during upload.
|
|||||||
|
|
||||||
Here are the standard options specific to zoho (Zoho).
|
Here are the standard options specific to zoho (Zoho).
|
||||||
|
|
||||||
|
#### --zoho-client-id
|
||||||
|
|
||||||
|
OAuth Client Id
|
||||||
|
Leave blank normally.
|
||||||
|
|
||||||
|
- Config: client_id
|
||||||
|
- Env Var: RCLONE_ZOHO_CLIENT_ID
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
|
#### --zoho-client-secret
|
||||||
|
|
||||||
|
OAuth Client Secret
|
||||||
|
Leave blank normally.
|
||||||
|
|
||||||
|
- Config: client_secret
|
||||||
|
- Env Var: RCLONE_ZOHO_CLIENT_SECRET
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
#### --zoho-region
|
#### --zoho-region
|
||||||
|
|
||||||
Zoho region to connect to. You'll have to use the region you organization is registered in.
|
Zoho region to connect to. You'll have to use the region you organization is registered in.
|
||||||
@@ -150,6 +170,35 @@ Zoho region to connect to. You'll have to use the region you organization is reg
|
|||||||
|
|
||||||
Here are the advanced options specific to zoho (Zoho).
|
Here are the advanced options specific to zoho (Zoho).
|
||||||
|
|
||||||
|
#### --zoho-token
|
||||||
|
|
||||||
|
OAuth Access Token as a JSON blob.
|
||||||
|
|
||||||
|
- Config: token
|
||||||
|
- Env Var: RCLONE_ZOHO_TOKEN
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
|
#### --zoho-auth-url
|
||||||
|
|
||||||
|
Auth server URL.
|
||||||
|
Leave blank to use the provider defaults.
|
||||||
|
|
||||||
|
- Config: auth_url
|
||||||
|
- Env Var: RCLONE_ZOHO_AUTH_URL
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
|
#### --zoho-token-url
|
||||||
|
|
||||||
|
Token server url.
|
||||||
|
Leave blank to use the provider defaults.
|
||||||
|
|
||||||
|
- Config: token_url
|
||||||
|
- Env Var: RCLONE_ZOHO_TOKEN_URL
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
#### --zoho-encoding
|
#### --zoho-encoding
|
||||||
|
|
||||||
This sets the encoding for the backend.
|
This sets the encoding for the backend.
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
v1.55.0
|
v1.56.0
|
||||||
@@ -214,7 +214,10 @@ func (acc *Account) averageLoop() {
|
|||||||
acc.values.mu.Lock()
|
acc.values.mu.Lock()
|
||||||
// Add average of last second.
|
// Add average of last second.
|
||||||
elapsed := now.Sub(acc.values.lpTime).Seconds()
|
elapsed := now.Sub(acc.values.lpTime).Seconds()
|
||||||
avg := float64(acc.values.lpBytes) / elapsed
|
avg := 0.0
|
||||||
|
if elapsed > 0 {
|
||||||
|
avg = float64(acc.values.lpBytes) / elapsed
|
||||||
|
}
|
||||||
// Soft start the moving average
|
// Soft start the moving average
|
||||||
if period < averagePeriod {
|
if period < averagePeriod {
|
||||||
period++
|
period++
|
||||||
@@ -442,7 +445,11 @@ func (acc *Account) speed() (bps, current float64) {
|
|||||||
}
|
}
|
||||||
// Calculate speed from first read.
|
// Calculate speed from first read.
|
||||||
total := float64(time.Now().Sub(acc.values.start)) / float64(time.Second)
|
total := float64(time.Now().Sub(acc.values.start)) / float64(time.Second)
|
||||||
|
if total > 0 {
|
||||||
bps = float64(acc.values.bytes) / total
|
bps = float64(acc.values.bytes) / total
|
||||||
|
} else {
|
||||||
|
bps = 0.0
|
||||||
|
}
|
||||||
current = acc.values.avg
|
current = acc.values.avg
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -520,14 +527,11 @@ func (acc *Account) rcStats() (out rc.Params) {
|
|||||||
out["speed"] = spd
|
out["speed"] = spd
|
||||||
out["speedAvg"] = cur
|
out["speedAvg"] = cur
|
||||||
|
|
||||||
eta, etaok := acc.eta()
|
eta, etaOK := acc.eta()
|
||||||
out["eta"] = nil
|
if etaOK {
|
||||||
if etaok {
|
|
||||||
if eta > 0 {
|
|
||||||
out["eta"] = eta.Seconds()
|
out["eta"] = eta.Seconds()
|
||||||
} else {
|
} else {
|
||||||
out["eta"] = 0
|
out["eta"] = nil
|
||||||
}
|
|
||||||
}
|
}
|
||||||
out["name"] = acc.name
|
out["name"] = acc.name
|
||||||
|
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
|
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
|
||||||
ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s.Speed())
|
ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s.speed())
|
||||||
ch <- prometheus.MustNewConstMetric(c.numOfErrors, prometheus.CounterValue, float64(s.errors))
|
ch <- prometheus.MustNewConstMetric(c.numOfErrors, prometheus.CounterValue, float64(s.errors))
|
||||||
ch <- prometheus.MustNewConstMetric(c.numOfCheckFiles, prometheus.CounterValue, float64(s.checks))
|
ch <- prometheus.MustNewConstMetric(c.numOfCheckFiles, prometheus.CounterValue, float64(s.checks))
|
||||||
ch <- prometheus.MustNewConstMetric(c.transferredFiles, prometheus.CounterValue, float64(s.transfers))
|
ch <- prometheus.MustNewConstMetric(c.transferredFiles, prometheus.CounterValue, float64(s.transfers))
|
||||||
|
|||||||
@@ -65,9 +65,19 @@ func NewStats(ctx context.Context) *StatsInfo {
|
|||||||
|
|
||||||
// RemoteStats returns stats for rc
|
// RemoteStats returns stats for rc
|
||||||
func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
||||||
|
// NB if adding values here - make sure you update the docs in
|
||||||
|
// stats_groups.go
|
||||||
|
|
||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
|
|
||||||
|
ts := s.calculateTransferStats()
|
||||||
|
out["totalChecks"] = ts.totalChecks
|
||||||
|
out["totalTransfers"] = ts.totalTransfers
|
||||||
|
out["totalBytes"] = ts.totalBytes
|
||||||
|
out["transferTime"] = ts.transferTime
|
||||||
|
out["speed"] = ts.speed
|
||||||
|
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
out["speed"] = s.Speed()
|
|
||||||
out["bytes"] = s.bytes
|
out["bytes"] = s.bytes
|
||||||
out["errors"] = s.errors
|
out["errors"] = s.errors
|
||||||
out["fatalError"] = s.fatalError
|
out["fatalError"] = s.fatalError
|
||||||
@@ -77,9 +87,15 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
|||||||
out["deletes"] = s.deletes
|
out["deletes"] = s.deletes
|
||||||
out["deletedDirs"] = s.deletedDirs
|
out["deletedDirs"] = s.deletedDirs
|
||||||
out["renames"] = s.renames
|
out["renames"] = s.renames
|
||||||
out["transferTime"] = s.totalDuration().Seconds()
|
|
||||||
out["elapsedTime"] = time.Since(startTime).Seconds()
|
out["elapsedTime"] = time.Since(startTime).Seconds()
|
||||||
|
eta, etaOK := eta(s.bytes, ts.totalBytes, ts.speed)
|
||||||
|
if etaOK {
|
||||||
|
out["eta"] = eta.Seconds()
|
||||||
|
} else {
|
||||||
|
out["eta"] = nil
|
||||||
|
}
|
||||||
s.mu.RUnlock()
|
s.mu.RUnlock()
|
||||||
|
|
||||||
if !s.checking.empty() {
|
if !s.checking.empty() {
|
||||||
out["checking"] = s.checking.remotes()
|
out["checking"] = s.checking.remotes()
|
||||||
}
|
}
|
||||||
@@ -89,11 +105,14 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
|||||||
if s.errors > 0 {
|
if s.errors > 0 {
|
||||||
out["lastError"] = s.lastError.Error()
|
out["lastError"] = s.lastError.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Speed returns the average speed of the transfer in bytes/second
|
// Speed returns the average speed of the transfer in bytes/second
|
||||||
func (s *StatsInfo) Speed() float64 {
|
//
|
||||||
|
// Call with lock held
|
||||||
|
func (s *StatsInfo) speed() float64 {
|
||||||
dt := s.totalDuration()
|
dt := s.totalDuration()
|
||||||
dtSeconds := dt.Seconds()
|
dtSeconds := dt.Seconds()
|
||||||
speed := 0.0
|
speed := 0.0
|
||||||
@@ -202,6 +221,9 @@ func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
|
|||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
seconds := float64(remaining) / rate
|
seconds := float64(remaining) / rate
|
||||||
|
if seconds < 0 {
|
||||||
|
seconds = 0
|
||||||
|
}
|
||||||
return time.Second * time.Duration(seconds), true
|
return time.Second * time.Duration(seconds), true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -227,36 +249,60 @@ func percent(a int64, b int64) string {
|
|||||||
return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
|
return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
|
||||||
}
|
}
|
||||||
|
|
||||||
// String convert the StatsInfo to a string for printing
|
// returned from calculateTransferStats
|
||||||
func (s *StatsInfo) String() string {
|
type transferStats struct {
|
||||||
|
totalChecks int64
|
||||||
|
totalTransfers int64
|
||||||
|
totalBytes int64
|
||||||
|
transferTime float64
|
||||||
|
speed float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateTransferStats calculates some addtional transfer stats not
|
||||||
|
// stored directly in StatsInfo
|
||||||
|
func (s *StatsInfo) calculateTransferStats() (ts transferStats) {
|
||||||
// checking and transferring have their own locking so read
|
// checking and transferring have their own locking so read
|
||||||
// here before lock to prevent deadlock on GetBytes
|
// here before lock to prevent deadlock on GetBytes
|
||||||
transferring, checking := s.transferring.count(), s.checking.count()
|
transferring, checking := s.transferring.count(), s.checking.count()
|
||||||
transferringBytesDone, transferringBytesTotal := s.transferring.progress(s)
|
transferringBytesDone, transferringBytesTotal := s.transferring.progress(s)
|
||||||
|
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
ts.totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
|
||||||
|
ts.totalTransfers = int64(s.transferQueue) + s.transfers + int64(transferring)
|
||||||
|
// note that s.bytes already includes transferringBytesDone so
|
||||||
|
// we take it off here to avoid double counting
|
||||||
|
ts.totalBytes = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
|
||||||
|
|
||||||
|
dt := s.totalDuration()
|
||||||
|
ts.transferTime = dt.Seconds()
|
||||||
|
ts.speed = 0.0
|
||||||
|
if dt > 0 {
|
||||||
|
ts.speed = float64(s.bytes) / ts.transferTime
|
||||||
|
}
|
||||||
|
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
// String convert the StatsInfo to a string for printing
|
||||||
|
func (s *StatsInfo) String() string {
|
||||||
|
// NB if adding more stats in here, remember to add them into
|
||||||
|
// RemoteStats() too.
|
||||||
|
|
||||||
|
ts := s.calculateTransferStats()
|
||||||
|
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
|
|
||||||
elapsedTime := time.Since(startTime)
|
elapsedTime := time.Since(startTime)
|
||||||
elapsedTimeSecondsOnly := elapsedTime.Truncate(time.Second/10) % time.Minute
|
elapsedTimeSecondsOnly := elapsedTime.Truncate(time.Second/10) % time.Minute
|
||||||
dt := s.totalDuration()
|
|
||||||
dtSeconds := dt.Seconds()
|
|
||||||
speed := 0.0
|
|
||||||
if dt > 0 {
|
|
||||||
speed = float64(s.bytes) / dtSeconds
|
|
||||||
}
|
|
||||||
|
|
||||||
displaySpeed := speed
|
displaySpeed := ts.speed
|
||||||
if s.ci.DataRateUnit == "bits" {
|
if s.ci.DataRateUnit == "bits" {
|
||||||
displaySpeed *= 8
|
displaySpeed *= 8
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
|
|
||||||
totalTransfer = int64(s.transferQueue) + s.transfers + int64(transferring)
|
|
||||||
// note that s.bytes already includes transferringBytesDone so
|
|
||||||
// we take it off here to avoid double counting
|
|
||||||
totalSize = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
|
|
||||||
currentSize = s.bytes
|
|
||||||
buf = &bytes.Buffer{}
|
buf = &bytes.Buffer{}
|
||||||
xfrchkString = ""
|
xfrchkString = ""
|
||||||
dateString = ""
|
dateString = ""
|
||||||
@@ -266,11 +312,11 @@ func (s *StatsInfo) String() string {
|
|||||||
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
|
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
|
||||||
} else {
|
} else {
|
||||||
xfrchk := []string{}
|
xfrchk := []string{}
|
||||||
if totalTransfer > 0 && s.transferQueue > 0 {
|
if ts.totalTransfers > 0 && s.transferQueue > 0 {
|
||||||
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer))
|
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, ts.totalTransfers))
|
||||||
}
|
}
|
||||||
if totalChecks > 0 && s.checkQueue > 0 {
|
if ts.totalChecks > 0 && s.checkQueue > 0 {
|
||||||
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks))
|
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, ts.totalChecks))
|
||||||
}
|
}
|
||||||
if len(xfrchk) > 0 {
|
if len(xfrchk) > 0 {
|
||||||
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
|
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
|
||||||
@@ -284,16 +330,16 @@ func (s *StatsInfo) String() string {
|
|||||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
||||||
dateString,
|
dateString,
|
||||||
fs.SizeSuffix(s.bytes),
|
fs.SizeSuffix(s.bytes),
|
||||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
fs.SizeSuffix(ts.totalBytes).Unit("Bytes"),
|
||||||
percent(s.bytes, totalSize),
|
percent(s.bytes, ts.totalBytes),
|
||||||
fs.SizeSuffix(displaySpeed).Unit(strings.Title(s.ci.DataRateUnit)+"/s"),
|
fs.SizeSuffix(displaySpeed).Unit(strings.Title(s.ci.DataRateUnit)+"/s"),
|
||||||
etaString(currentSize, totalSize, speed),
|
etaString(s.bytes, ts.totalBytes, ts.speed),
|
||||||
xfrchkString,
|
xfrchkString,
|
||||||
)
|
)
|
||||||
|
|
||||||
if s.ci.ProgressTerminalTitle {
|
if s.ci.ProgressTerminalTitle {
|
||||||
// Writes ETA to the terminal title
|
// Writes ETA to the terminal title
|
||||||
terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed))
|
terminal.WriteTerminalTitle("ETA: " + etaString(s.bytes, ts.totalBytes, ts.speed))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !s.ci.StatsOneLine {
|
if !s.ci.StatsOneLine {
|
||||||
@@ -314,9 +360,9 @@ func (s *StatsInfo) String() string {
|
|||||||
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
|
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
|
||||||
s.errors, errorDetails)
|
s.errors, errorDetails)
|
||||||
}
|
}
|
||||||
if s.checks != 0 || totalChecks != 0 {
|
if s.checks != 0 || ts.totalChecks != 0 {
|
||||||
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
|
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
|
||||||
s.checks, totalChecks, percent(s.checks, totalChecks))
|
s.checks, ts.totalChecks, percent(s.checks, ts.totalChecks))
|
||||||
}
|
}
|
||||||
if s.deletes != 0 || s.deletedDirs != 0 {
|
if s.deletes != 0 || s.deletedDirs != 0 {
|
||||||
_, _ = fmt.Fprintf(buf, "Deleted: %10d (files), %d (dirs)\n", s.deletes, s.deletedDirs)
|
_, _ = fmt.Fprintf(buf, "Deleted: %10d (files), %d (dirs)\n", s.deletes, s.deletedDirs)
|
||||||
@@ -324,9 +370,9 @@ func (s *StatsInfo) String() string {
|
|||||||
if s.renames != 0 {
|
if s.renames != 0 {
|
||||||
_, _ = fmt.Fprintf(buf, "Renamed: %10d\n", s.renames)
|
_, _ = fmt.Fprintf(buf, "Renamed: %10d\n", s.renames)
|
||||||
}
|
}
|
||||||
if s.transfers != 0 || totalTransfer != 0 {
|
if s.transfers != 0 || ts.totalTransfers != 0 {
|
||||||
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
|
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
|
||||||
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
|
s.transfers, ts.totalTransfers, percent(s.transfers, ts.totalTransfers))
|
||||||
}
|
}
|
||||||
_, _ = fmt.Fprintf(buf, "Elapsed time: %10ss\n", strings.TrimRight(elapsedTime.Truncate(time.Minute).String(), "0s")+fmt.Sprintf("%.1f", elapsedTimeSecondsOnly.Seconds()))
|
_, _ = fmt.Fprintf(buf, "Elapsed time: %10ss\n", strings.TrimRight(elapsedTime.Truncate(time.Minute).String(), "0s")+fmt.Sprintf("%.1f", elapsedTimeSecondsOnly.Seconds()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -86,18 +86,22 @@ Returns the following values:
|
|||||||
|
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
{
|
{
|
||||||
"speed": average speed in bytes/sec since start of the process,
|
"bytes": total transferred bytes since the start of the group,
|
||||||
"bytes": total transferred bytes since the start of the process,
|
"checks": number of files checked,
|
||||||
|
"deletes" : number of files deleted,
|
||||||
|
"elapsedTime": time in floating point seconds since rclone was started,
|
||||||
"errors": number of errors,
|
"errors": number of errors,
|
||||||
"fatalError": whether there has been at least one FatalError,
|
"eta": estimated time in seconds until the group completes,
|
||||||
"retryError": whether there has been at least one non-NoRetryError,
|
"fatalError": boolean whether there has been at least one fatal error,
|
||||||
"checks": number of checked files,
|
"lastError": last error string,
|
||||||
"transfers": number of transferred files,
|
"renames" : number of files renamed,
|
||||||
"deletes" : number of deleted files,
|
"retryError": boolean showing whether there has been at least one non-NoRetryError,
|
||||||
"renames" : number of renamed files,
|
"speed": average speed in bytes/sec since start of the group,
|
||||||
|
"totalBytes": total number of bytes in the group,
|
||||||
|
"totalChecks": total number of checks in the group,
|
||||||
|
"totalTransfers": total number of transfers in the group,
|
||||||
"transferTime" : total time spent on running jobs,
|
"transferTime" : total time spent on running jobs,
|
||||||
"elapsedTime": time in seconds since the start of the process,
|
"transfers": number of transferred files,
|
||||||
"lastError": last occurred error,
|
|
||||||
"transferring": an array of currently active file transfers:
|
"transferring": an array of currently active file transfers:
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
|
|||||||
59
fs/cache/cache.go
vendored
59
fs/cache/cache.go
vendored
@@ -7,18 +7,31 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/filter"
|
||||||
"github.com/rclone/rclone/lib/cache"
|
"github.com/rclone/rclone/lib/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
c = cache.New()
|
once sync.Once // creation
|
||||||
|
c *cache.Cache
|
||||||
mu sync.Mutex // mutex to protect remap
|
mu sync.Mutex // mutex to protect remap
|
||||||
remap = map[string]string{} // map user supplied names to canonical names
|
remap = map[string]string{} // map user supplied names to canonical names
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Create the cache just once
|
||||||
|
func createOnFirstUse() {
|
||||||
|
once.Do(func() {
|
||||||
|
ci := fs.GetConfig(context.Background())
|
||||||
|
c = cache.New()
|
||||||
|
c.SetExpireDuration(ci.FsCacheExpireDuration)
|
||||||
|
c.SetExpireInterval(ci.FsCacheExpireInterval)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Canonicalize looks up fsString in the mapping from user supplied
|
// Canonicalize looks up fsString in the mapping from user supplied
|
||||||
// names to canonical names and return the canonical form
|
// names to canonical names and return the canonical form
|
||||||
func Canonicalize(fsString string) string {
|
func Canonicalize(fsString string) string {
|
||||||
|
createOnFirstUse()
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
canonicalName, ok := remap[fsString]
|
canonicalName, ok := remap[fsString]
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
@@ -42,10 +55,11 @@ func addMapping(fsString, canonicalName string) {
|
|||||||
// GetFn gets an fs.Fs named fsString either from the cache or creates
|
// GetFn gets an fs.Fs named fsString either from the cache or creates
|
||||||
// it afresh with the create function
|
// it afresh with the create function
|
||||||
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
|
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
|
||||||
fsString = Canonicalize(fsString)
|
createOnFirstUse()
|
||||||
|
canonicalFsString := Canonicalize(fsString)
|
||||||
created := false
|
created := false
|
||||||
value, err := c.Get(fsString, func(fsString string) (f interface{}, ok bool, err error) {
|
value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f interface{}, ok bool, err error) {
|
||||||
f, err = create(ctx, fsString)
|
f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string
|
||||||
ok = err == nil || err == fs.ErrorIsFile
|
ok = err == nil || err == fs.ErrorIsFile
|
||||||
created = ok
|
created = ok
|
||||||
return f, ok, err
|
return f, ok, err
|
||||||
@@ -57,19 +71,19 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
|
|||||||
// Check we stored the Fs at the canonical name
|
// Check we stored the Fs at the canonical name
|
||||||
if created {
|
if created {
|
||||||
canonicalName := fs.ConfigString(f)
|
canonicalName := fs.ConfigString(f)
|
||||||
if canonicalName != fsString {
|
if canonicalName != canonicalFsString {
|
||||||
// Note that if err == fs.ErrorIsFile at this moment
|
// Note that if err == fs.ErrorIsFile at this moment
|
||||||
// then we can't rename the remote as it will have the
|
// then we can't rename the remote as it will have the
|
||||||
// wrong error status, we need to add a new one.
|
// wrong error status, we need to add a new one.
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", fsString, canonicalName)
|
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
|
||||||
value, found := c.Rename(fsString, canonicalName)
|
value, found := c.Rename(canonicalFsString, canonicalName)
|
||||||
if found {
|
if found {
|
||||||
f = value.(fs.Fs)
|
f = value.(fs.Fs)
|
||||||
}
|
}
|
||||||
addMapping(fsString, canonicalName)
|
addMapping(canonicalFsString, canonicalName)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", fsString, canonicalName)
|
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
|
||||||
Put(canonicalName, f)
|
Put(canonicalName, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -79,6 +93,7 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
|
|||||||
|
|
||||||
// Pin f into the cache until Unpin is called
|
// Pin f into the cache until Unpin is called
|
||||||
func Pin(f fs.Fs) {
|
func Pin(f fs.Fs) {
|
||||||
|
createOnFirstUse()
|
||||||
c.Pin(fs.ConfigString(f))
|
c.Pin(fs.ConfigString(f))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,12 +111,20 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
|
|||||||
|
|
||||||
// Unpin f from the cache
|
// Unpin f from the cache
|
||||||
func Unpin(f fs.Fs) {
|
func Unpin(f fs.Fs) {
|
||||||
|
createOnFirstUse()
|
||||||
c.Pin(fs.ConfigString(f))
|
c.Pin(fs.ConfigString(f))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets an fs.Fs named fsString either from the cache or creates it afresh
|
// Get gets an fs.Fs named fsString either from the cache or creates it afresh
|
||||||
func Get(ctx context.Context, fsString string) (f fs.Fs, err error) {
|
func Get(ctx context.Context, fsString string) (f fs.Fs, err error) {
|
||||||
return GetFn(ctx, fsString, fs.NewFs)
|
// If we are making a long lived backend which lives longer
|
||||||
|
// than this request, we want to disconnect it from the
|
||||||
|
// current context and in particular any WithCancel contexts,
|
||||||
|
// but we want to preserve the config embedded in the context.
|
||||||
|
newCtx := context.Background()
|
||||||
|
newCtx = fs.CopyConfig(newCtx, ctx)
|
||||||
|
newCtx = filter.CopyConfig(newCtx, ctx)
|
||||||
|
return GetFn(newCtx, fsString, fs.NewFs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetArr gets []fs.Fs from []fsStrings either from the cache or creates it afresh
|
// GetArr gets []fs.Fs from []fsStrings either from the cache or creates it afresh
|
||||||
@@ -119,12 +142,28 @@ func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
|
|||||||
|
|
||||||
// Put puts an fs.Fs named fsString into the cache
|
// Put puts an fs.Fs named fsString into the cache
|
||||||
func Put(fsString string, f fs.Fs) {
|
func Put(fsString string, f fs.Fs) {
|
||||||
|
createOnFirstUse()
|
||||||
canonicalName := fs.ConfigString(f)
|
canonicalName := fs.ConfigString(f)
|
||||||
c.Put(canonicalName, f)
|
c.Put(canonicalName, f)
|
||||||
addMapping(fsString, canonicalName)
|
addMapping(fsString, canonicalName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClearConfig deletes all entries which were based on the config name passed in
|
||||||
|
//
|
||||||
|
// Returns number of entries deleted
|
||||||
|
func ClearConfig(name string) (deleted int) {
|
||||||
|
createOnFirstUse()
|
||||||
|
return c.DeletePrefix(name + ":")
|
||||||
|
}
|
||||||
|
|
||||||
// Clear removes everything from the cache
|
// Clear removes everything from the cache
|
||||||
func Clear() {
|
func Clear() {
|
||||||
|
createOnFirstUse()
|
||||||
c.Clear()
|
c.Clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Entries returns the number of entries in the cache
|
||||||
|
func Entries() int {
|
||||||
|
createOnFirstUse()
|
||||||
|
return c.Entries()
|
||||||
|
}
|
||||||
|
|||||||
59
fs/cache/cache_test.go
vendored
59
fs/cache/cache_test.go
vendored
@@ -33,7 +33,7 @@ func mockNewFs(t *testing.T) (func(), func(ctx context.Context, path string) (fs
|
|||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
c.Clear()
|
Clear()
|
||||||
}
|
}
|
||||||
return cleanup, create
|
return cleanup, create
|
||||||
}
|
}
|
||||||
@@ -42,12 +42,12 @@ func TestGet(t *testing.T) {
|
|||||||
cleanup, create := mockNewFs(t)
|
cleanup, create := mockNewFs(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
assert.Equal(t, 0, c.Entries())
|
assert.Equal(t, 0, Entries())
|
||||||
|
|
||||||
f, err := GetFn(context.Background(), "mock:/", create)
|
f, err := GetFn(context.Background(), "mock:/", create)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, 1, c.Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
|
|
||||||
f2, err := GetFn(context.Background(), "mock:/", create)
|
f2, err := GetFn(context.Background(), "mock:/", create)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -59,13 +59,13 @@ func TestGetFile(t *testing.T) {
|
|||||||
cleanup, create := mockNewFs(t)
|
cleanup, create := mockNewFs(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
assert.Equal(t, 0, c.Entries())
|
assert.Equal(t, 0, Entries())
|
||||||
|
|
||||||
f, err := GetFn(context.Background(), "mock:/file.txt", create)
|
f, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
require.Equal(t, fs.ErrorIsFile, err)
|
||||||
require.NotNil(t, f)
|
require.NotNil(t, f)
|
||||||
|
|
||||||
assert.Equal(t, 2, c.Entries())
|
assert.Equal(t, 2, Entries())
|
||||||
|
|
||||||
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
|
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
require.Equal(t, fs.ErrorIsFile, err)
|
||||||
@@ -85,13 +85,13 @@ func TestGetFile2(t *testing.T) {
|
|||||||
cleanup, create := mockNewFs(t)
|
cleanup, create := mockNewFs(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
assert.Equal(t, 0, c.Entries())
|
assert.Equal(t, 0, Entries())
|
||||||
|
|
||||||
f, err := GetFn(context.Background(), "mock:file.txt", create)
|
f, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
require.Equal(t, fs.ErrorIsFile, err)
|
||||||
require.NotNil(t, f)
|
require.NotNil(t, f)
|
||||||
|
|
||||||
assert.Equal(t, 2, c.Entries())
|
assert.Equal(t, 2, Entries())
|
||||||
|
|
||||||
f2, err := GetFn(context.Background(), "mock:file.txt", create)
|
f2, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
require.Equal(t, fs.ErrorIsFile, err)
|
||||||
@@ -111,13 +111,13 @@ func TestGetError(t *testing.T) {
|
|||||||
cleanup, create := mockNewFs(t)
|
cleanup, create := mockNewFs(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
assert.Equal(t, 0, c.Entries())
|
assert.Equal(t, 0, Entries())
|
||||||
|
|
||||||
f, err := GetFn(context.Background(), "mock:/error", create)
|
f, err := GetFn(context.Background(), "mock:/error", create)
|
||||||
require.Equal(t, errSentinel, err)
|
require.Equal(t, errSentinel, err)
|
||||||
require.Equal(t, nil, f)
|
require.Equal(t, nil, f)
|
||||||
|
|
||||||
assert.Equal(t, 0, c.Entries())
|
assert.Equal(t, 0, Entries())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPut(t *testing.T) {
|
func TestPut(t *testing.T) {
|
||||||
@@ -126,17 +126,17 @@ func TestPut(t *testing.T) {
|
|||||||
|
|
||||||
f := mockfs.NewFs(context.Background(), "mock", "/alien")
|
f := mockfs.NewFs(context.Background(), "mock", "/alien")
|
||||||
|
|
||||||
assert.Equal(t, 0, c.Entries())
|
assert.Equal(t, 0, Entries())
|
||||||
|
|
||||||
Put("mock:/alien", f)
|
Put("mock:/alien", f)
|
||||||
|
|
||||||
assert.Equal(t, 1, c.Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
|
|
||||||
fNew, err := GetFn(context.Background(), "mock:/alien", create)
|
fNew, err := GetFn(context.Background(), "mock:/alien", create)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, f, fNew)
|
require.Equal(t, f, fNew)
|
||||||
|
|
||||||
assert.Equal(t, 1, c.Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
|
|
||||||
// Check canonicalisation
|
// Check canonicalisation
|
||||||
|
|
||||||
@@ -146,7 +146,7 @@ func TestPut(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, f, fNew)
|
require.Equal(t, f, fNew)
|
||||||
|
|
||||||
assert.Equal(t, 1, c.Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,6 +166,22 @@ func TestPin(t *testing.T) {
|
|||||||
Unpin(f2)
|
Unpin(f2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestClearConfig(t *testing.T) {
|
||||||
|
cleanup, create := mockNewFs(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
assert.Equal(t, 0, Entries())
|
||||||
|
|
||||||
|
_, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||||
|
require.Equal(t, fs.ErrorIsFile, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 2, Entries()) // file + parent
|
||||||
|
|
||||||
|
assert.Equal(t, 2, ClearConfig("mock"))
|
||||||
|
|
||||||
|
assert.Equal(t, 0, Entries())
|
||||||
|
}
|
||||||
|
|
||||||
func TestClear(t *testing.T) {
|
func TestClear(t *testing.T) {
|
||||||
cleanup, create := mockNewFs(t)
|
cleanup, create := mockNewFs(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
@@ -174,9 +190,22 @@ func TestClear(t *testing.T) {
|
|||||||
_, err := GetFn(context.Background(), "mock:/", create)
|
_, err := GetFn(context.Background(), "mock:/", create)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, 1, c.Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
|
|
||||||
Clear()
|
Clear()
|
||||||
|
|
||||||
assert.Equal(t, 0, c.Entries())
|
assert.Equal(t, 0, Entries())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntries(t *testing.T) {
|
||||||
|
cleanup, create := mockNewFs(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
assert.Equal(t, 0, Entries())
|
||||||
|
|
||||||
|
// Create something
|
||||||
|
_, err := GetFn(context.Background(), "mock:/", create)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, Entries())
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user