mirror of
https://github.com/rclone/rclone.git
synced 2026-01-06 02:23:24 +00:00
Compare commits
221 Commits
fix-5951-m
...
fix-webdav
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dfc5b0460b | ||
|
|
26db80c270 | ||
|
|
9eb3470c9c | ||
|
|
a449dd7d1c | ||
|
|
fc4fe33703 | ||
|
|
e11bfacfcf | ||
|
|
a9c49c50a0 | ||
|
|
8979337313 | ||
|
|
7ffab5d998 | ||
|
|
3ccf222acb | ||
|
|
2781f8e2f1 | ||
|
|
3d55f69338 | ||
|
|
cc9bc2cb80 | ||
|
|
80ac59ee5b | ||
|
|
5d6a6dd6c0 | ||
|
|
c676e2139d | ||
|
|
7361c98b2d | ||
|
|
5cc47de912 | ||
|
|
6d342a3c5b | ||
|
|
336051870e | ||
|
|
38c6d022bd | ||
|
|
c138367df6 | ||
|
|
da404dc0f2 | ||
|
|
28e43fe7af | ||
|
|
3ec25f437b | ||
|
|
a34276e9b3 | ||
|
|
c2baacc0a4 | ||
|
|
fcec4bedbe | ||
|
|
813a5e0931 | ||
|
|
bd4abb15a3 | ||
|
|
7f84283539 | ||
|
|
47b1a0d6fa | ||
|
|
ce168ecac2 | ||
|
|
4f0ddb60e7 | ||
|
|
b929a56f46 | ||
|
|
74af6409d4 | ||
|
|
0e77072dcc | ||
|
|
2437eb3cce | ||
|
|
a12c94caff | ||
|
|
542c1616b8 | ||
|
|
8697f0bd26 | ||
|
|
a9f18f8093 | ||
|
|
8e5e230b81 | ||
|
|
c0985e93b7 | ||
|
|
fb4f7555c7 | ||
|
|
f2e7a2e794 | ||
|
|
9e4854955c | ||
|
|
319ac225e4 | ||
|
|
a9d3283d97 | ||
|
|
edf0412464 | ||
|
|
e6194a4b83 | ||
|
|
7f05990623 | ||
|
|
e16f2a566f | ||
|
|
a36fef8a66 | ||
|
|
6500e1d205 | ||
|
|
9f7484e4e9 | ||
|
|
0ba702ccf4 | ||
|
|
6f91198b57 | ||
|
|
cf0a72aecd | ||
|
|
f6fd6ee777 | ||
|
|
1e66d052fd | ||
|
|
e5974ac4b0 | ||
|
|
50a0c3482d | ||
|
|
389a29b017 | ||
|
|
9dcf9375e8 | ||
|
|
1d6d41fb91 | ||
|
|
a3d4307892 | ||
|
|
a446106041 | ||
|
|
607172b6ec | ||
|
|
94757277bc | ||
|
|
deab86867c | ||
|
|
c0c5b3bc6b | ||
|
|
a947f298e6 | ||
|
|
1b0128ecb2 | ||
|
|
c5395db1f1 | ||
|
|
6e5382fc99 | ||
|
|
134592adaa | ||
|
|
36e614f550 | ||
|
|
7bfed98b48 | ||
|
|
f471096fd0 | ||
|
|
4cebade95d | ||
|
|
a8cd18faf3 | ||
|
|
e34c543660 | ||
|
|
598364ad0f | ||
|
|
211dbe9aee | ||
|
|
4829527dac | ||
|
|
cc8dde402f | ||
|
|
2b67ad17aa | ||
|
|
6da3522499 | ||
|
|
97606bbdef | ||
|
|
a15885dd74 | ||
|
|
87c201c92a | ||
|
|
d77736c21a | ||
|
|
86bd5f6922 | ||
|
|
fe271a4e35 | ||
|
|
75455d4000 | ||
|
|
82e24f521f | ||
|
|
5605e34f7b | ||
|
|
06598531e0 | ||
|
|
b1d43f8d41 | ||
|
|
b53c38c9fd | ||
|
|
03715f6c6b | ||
|
|
07481396e0 | ||
|
|
bab91e4402 | ||
|
|
fde40319ef | ||
|
|
94e330d4fa | ||
|
|
087543d723 | ||
|
|
6a759d936a | ||
|
|
7c31240bb8 | ||
|
|
25146b4306 | ||
|
|
240561850b | ||
|
|
39a1e37441 | ||
|
|
4c02f50ef5 | ||
|
|
f583b86334 | ||
|
|
118e8e1470 | ||
|
|
afcea9c72b | ||
|
|
27176cc6bb | ||
|
|
f1e4b7da7b | ||
|
|
f065a267f6 | ||
|
|
17f8014909 | ||
|
|
8ba04562c3 | ||
|
|
285747b1d1 | ||
|
|
7bb8b8f4ba | ||
|
|
59c242bbf6 | ||
|
|
a2bacd7d3f | ||
|
|
9babcc4811 | ||
|
|
a0f665ec3c | ||
|
|
ecdf42c17f | ||
|
|
be9ee1d138 | ||
|
|
9e9ead2ac4 | ||
|
|
4f78226f8b | ||
|
|
54c9c3156c | ||
|
|
6ecbbf796e | ||
|
|
603e51c43f | ||
|
|
ca4671126e | ||
|
|
6ea26b508a | ||
|
|
887cccb2c1 | ||
|
|
d975196cfa | ||
|
|
1f39b28f49 | ||
|
|
2738db22fb | ||
|
|
1978ddde73 | ||
|
|
c2bfda22ab | ||
|
|
d4da9b98d6 | ||
|
|
e4f5912294 | ||
|
|
750fffdf71 | ||
|
|
388e74af52 | ||
|
|
f9354fff2f | ||
|
|
ff1f173fc2 | ||
|
|
f8073a7b63 | ||
|
|
807f1cedaa | ||
|
|
bf9c68c88a | ||
|
|
189cba0fbe | ||
|
|
69f726f16c | ||
|
|
65652f7a75 | ||
|
|
47f9ab2f56 | ||
|
|
5dd51e6149 | ||
|
|
6a6d254a9f | ||
|
|
fd453f2c7b | ||
|
|
5d06a82c5d | ||
|
|
847868b4ba | ||
|
|
38ca178cf3 | ||
|
|
9427d22f99 | ||
|
|
7b1428a498 | ||
|
|
ec72432cec | ||
|
|
2339172df2 | ||
|
|
268b808bf8 | ||
|
|
74898bac3b | ||
|
|
e0fbca02d4 | ||
|
|
21355b4208 | ||
|
|
251b84ff2c | ||
|
|
537b62917f | ||
|
|
71a784cfa2 | ||
|
|
8ee0fe9863 | ||
|
|
8f164e4df5 | ||
|
|
06ecc6511b | ||
|
|
3529bdec9b | ||
|
|
486b43f8c7 | ||
|
|
89f0e4df80 | ||
|
|
399fb5b7fb | ||
|
|
19f1ed949c | ||
|
|
d3a1001094 | ||
|
|
dc7e3ea1e3 | ||
|
|
f22b703a51 | ||
|
|
c40129d610 | ||
|
|
8dc93f1792 | ||
|
|
f4c40bf79d | ||
|
|
9cc50a614b | ||
|
|
bcb07a67f6 | ||
|
|
25ea04f1db | ||
|
|
06ffd4882d | ||
|
|
19a5e1d63b | ||
|
|
ec88b66dad | ||
|
|
aa2d7f00c2 | ||
|
|
3e125443aa | ||
|
|
3c271b8b1e | ||
|
|
6d92ba2c6c | ||
|
|
c26dc69e1b | ||
|
|
b0de0b4609 | ||
|
|
f54641511a | ||
|
|
8cf76f5e11 | ||
|
|
18c24014da | ||
|
|
0ae39bda8d | ||
|
|
051685baa1 | ||
|
|
07f53aebdc | ||
|
|
bd6d36b3f6 | ||
|
|
b168479429 | ||
|
|
b447b0cd78 | ||
|
|
4bd2386632 | ||
|
|
83b6b62c1b | ||
|
|
5826cc9d9e | ||
|
|
252432ae54 | ||
|
|
8821629333 | ||
|
|
a2092a8faf | ||
|
|
2b6f4241b4 | ||
|
|
e3dd16d490 | ||
|
|
9e1fd923f6 | ||
|
|
3684789858 | ||
|
|
1ac1dd428a | ||
|
|
65dbd29c22 | ||
|
|
164774d7e1 | ||
|
|
507020f408 |
111
.github/workflows/build.yml
vendored
111
.github/workflows/build.yml
vendored
@@ -25,23 +25,23 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['mac_amd64', 'mac_arm64']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
|
||||
include:
|
||||
# - job_name: linux
|
||||
# os: ubuntu-latest
|
||||
# go: '1.17.x'
|
||||
# gotags: cmount
|
||||
# build_flags: '-include "^linux/"'
|
||||
# check: true
|
||||
# quicktest: true
|
||||
# racequicktest: true
|
||||
# librclonetest: true
|
||||
# deploy: true
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -50,50 +50,39 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
# - job_name: windows_amd64
|
||||
# os: windows-latest
|
||||
# go: '1.17.x'
|
||||
# gotags: cmount
|
||||
# build_flags: '-include "^windows/amd64" -cgo'
|
||||
# build_args: '-buildmode exe'
|
||||
# quicktest: true
|
||||
# racequicktest: true
|
||||
# deploy: true
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
# - job_name: windows_386
|
||||
# os: windows-latest
|
||||
# go: '1.17.x'
|
||||
# gotags: cmount
|
||||
# goarch: '386'
|
||||
# cgo: '1'
|
||||
# build_flags: '-include "^windows/386" -cgo'
|
||||
# build_args: '-buildmode exe'
|
||||
# quicktest: true
|
||||
# deploy: true
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
# - job_name: other_os
|
||||
# os: ubuntu-latest
|
||||
# go: '1.17.x'
|
||||
# build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
# compile_all: true
|
||||
# deploy: true
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
# - job_name: go1.15
|
||||
# os: ubuntu-latest
|
||||
# go: '1.15.x'
|
||||
# quicktest: true
|
||||
# racequicktest: true
|
||||
|
||||
# - job_name: go1.16
|
||||
# os: ubuntu-latest
|
||||
# go: '1.16.x'
|
||||
# quicktest: true
|
||||
# racequicktest: true
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
@@ -110,6 +99,7 @@ jobs:
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -177,6 +167,11 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -245,14 +240,14 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18.x
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
@@ -273,8 +268,8 @@ jobs:
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
@@ -283,7 +278,7 @@ jobs:
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
@@ -296,7 +291,7 @@ jobs:
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -309,7 +304,7 @@ jobs:
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -322,7 +317,7 @@ jobs:
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
|
||||
@@ -15,7 +15,7 @@ Current active maintainers of rclone are:
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
5267
MANUAL.html
generated
5267
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7550
MANUAL.txt
generated
7550
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
10
Makefile
10
Makefile
@@ -97,7 +97,7 @@ release_dep_linux:
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@@ -245,18 +245,18 @@ retag:
|
||||
startdev:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next version is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
11
README.md
11
README.md
@@ -1,4 +1,5 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -20,14 +21,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
@@ -38,6 +44,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
@@ -65,8 +72,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
@@ -39,9 +41,9 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -43,8 +44,9 @@ import (
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
maxUploadParts = 50000 // maximum allowed number of parts/blocks in a multi-part upload
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
@@ -371,15 +373,9 @@ func (o *Object) split() (container, containerPath string) {
|
||||
|
||||
// validateAccessTier checks if azureblob supports user supplied tier
|
||||
func validateAccessTier(tier string) bool {
|
||||
switch tier {
|
||||
case string(azblob.AccessTierHot),
|
||||
string(azblob.AccessTierCool),
|
||||
string(azblob.AccessTierArchive):
|
||||
// valid cases
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(tier, string(azblob.AccessTierHot)) ||
|
||||
strings.EqualFold(tier, string(azblob.AccessTierCool)) ||
|
||||
strings.EqualFold(tier, string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
// validatePublicAccess checks if azureblob supports use supplied public access level
|
||||
@@ -612,7 +608,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.UseMSI:
|
||||
var token adal.Token
|
||||
var userMSI *userMSI = &userMSI{}
|
||||
var userMSI = &userMSI{}
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
@@ -1689,8 +1685,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
uploadParts := int64(maxUploadParts)
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
// calculate size of parts/blocks
|
||||
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
BufferSize: int(partSize),
|
||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
|
||||
@@ -61,3 +61,25 @@ func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
want bool
|
||||
}{
|
||||
"hot": {"hot", true},
|
||||
"HOT": {"HOT", true},
|
||||
"Hot": {"Hot", true},
|
||||
"cool": {"cool", true},
|
||||
"archive": {"archive", true},
|
||||
"empty": {"", false},
|
||||
"unknown": {"unknown", false},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := validateAccessTier(test.accessTier)
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,7 +64,8 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,6 +107,11 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version_at",
|
||||
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: fs.Time{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
@@ -211,6 +217,7 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
@@ -696,9 +703,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
Method: "POST",
|
||||
Path: "/b2_list_file_names",
|
||||
}
|
||||
if hidden {
|
||||
if hidden || f.opt.VersionAt.IsSet() {
|
||||
opts.Path = "/b2_list_file_versions"
|
||||
}
|
||||
|
||||
lastFileName := ""
|
||||
|
||||
for {
|
||||
var response api.ListFileNamesResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -728,7 +738,21 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
|
||||
if f.opt.VersionAt.IsSet() {
|
||||
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
|
||||
// Ignore versions that were created after the specified time
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Name == lastFileName {
|
||||
// Ignore versions before the already returned version
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Send object
|
||||
lastFileName = file.Name
|
||||
err = fn(remote, file, isDirectory)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -1828,6 +1852,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1983,6 +2010,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -88,21 +89,19 @@ type largeUpload struct {
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
chunkSize := defaultChunkSize
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
|
||||
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@@ -394,7 +394,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
rps := rate.Inf
|
||||
if opt.Rps > 0 {
|
||||
rps = rate.Limit(float64(opt.Rps))
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if opt.PlexURL != "" {
|
||||
@@ -1743,7 +1747,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -515,7 +515,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
|
||||
strRegex := regexp.QuoteMeta(pattern)
|
||||
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
||||
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
|
||||
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
|
||||
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
||||
f.nameRegexp = regexp.MustCompile(strRegex)
|
||||
|
||||
@@ -524,7 +524,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
if numDigits > 1 {
|
||||
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
||||
}
|
||||
strFmt := strings.Replace(pattern, "%", "%%", -1)
|
||||
strFmt := strings.ReplaceAll(pattern, "%", "%%")
|
||||
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
||||
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
||||
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
||||
@@ -1895,7 +1895,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.base.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1904,7 +1904,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.base.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -401,6 +401,10 @@ func isCompressible(r io.Reader) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
@@ -626,9 +630,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
||||
// Put the data
|
||||
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
||||
if err != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
if mo != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -900,7 +906,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp: not supported by underlying remote")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -909,7 +915,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("can't About: not supported by underlying remote")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -597,7 +597,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -606,7 +606,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
defaultXDGIcon = "text-html"
|
||||
@@ -84,7 +84,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
_mimeTypeToExtensionDuplicates = map[string]string{
|
||||
"application/x-vnd.oasis.opendocument.presentation": ".odp",
|
||||
@@ -299,6 +299,17 @@ a non root folder as its starting point.
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_shortcut_content",
|
||||
Default: false,
|
||||
Help: `Server side copy contents of shortcuts instead of the shortcut.
|
||||
|
||||
When doing server side copies, normally rclone will copy shortcuts as
|
||||
shortcuts.
|
||||
|
||||
If this flag is used then rclone will copy the contents of shortcuts
|
||||
rather than shortcuts themselves when doing server side copies.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
@@ -542,6 +553,14 @@ Google don't document so it may break in the future.
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "skip_dangling_shortcuts",
|
||||
Help: `If set skip dangling shortcut files.
|
||||
|
||||
If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
@@ -578,6 +597,7 @@ type Options struct {
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
CopyShortcutContent bool `config:"copy_shortcut_content"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
@@ -604,6 +624,7 @@ type Options struct {
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -639,6 +660,7 @@ type baseObject struct {
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents []string // IDs of the parent directories
|
||||
resourceKey *string // resourceKey is needed for link shared objects
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -808,8 +830,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if title != "" {
|
||||
searchTitle := f.opt.Enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `\`, `\\`)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `'`, `\'`)
|
||||
|
||||
var titleQuery bytes.Buffer
|
||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||
@@ -906,6 +928,11 @@ OUTER:
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
// leave the dangling shortcut out of the listings
|
||||
// we've already logged about the dangling shortcut in resolveShortcut
|
||||
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
@@ -1293,12 +1320,16 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
}
|
||||
}
|
||||
}
|
||||
return &Object{
|
||||
o := &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||
md5sum: strings.ToLower(info.Md5Checksum),
|
||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||
}
|
||||
if info.ResourceKey != "" {
|
||||
o.resourceKey = &info.ResourceKey
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// newDocumentObject creates an fs.Object for a google docs drive.File
|
||||
@@ -1571,6 +1602,15 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
}
|
||||
}
|
||||
|
||||
// If using a link type export and a more specific export
|
||||
// hasn't been found all docs should be exported
|
||||
for _, _extension := range f.exportExtensions {
|
||||
_mimeType := mime.TypeByExtension(_extension)
|
||||
if isLinkMimeType(_mimeType) {
|
||||
return _extension, _mimeType, true
|
||||
}
|
||||
}
|
||||
|
||||
// else return empty
|
||||
return "", "", isDocument
|
||||
}
|
||||
@@ -1581,6 +1621,14 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", "", false)
|
||||
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if item.Md5Checksum != "" {
|
||||
return
|
||||
}
|
||||
// Folders can't be documents
|
||||
if item.MimeType == driveFolderType {
|
||||
return
|
||||
}
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||
if extension != "" {
|
||||
filename = item.Name + extension
|
||||
@@ -2374,16 +2422,24 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
// get the ID of the thing to copy
|
||||
// copy the contents if CopyShortcutContent
|
||||
// else copy the shortcut only
|
||||
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
if f.opt.CopyShortcutContent {
|
||||
id = actualID(srcObj.id)
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
copy := f.svc.Files.Copy(id, createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
Context(ctx).Do()
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever)
|
||||
srcObj.addResourceKey(copy.Header())
|
||||
info, err = copy.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -3480,6 +3536,14 @@ func (o *baseObject) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
|
||||
// object if required.
|
||||
func (o *baseObject) addResourceKey(header http.Header) {
|
||||
if o.resourceKey != nil {
|
||||
header.Add("X-Goog-Drive-Resource-Keys", fmt.Sprintf("%s/%s", o.id, *o.resourceKey))
|
||||
}
|
||||
}
|
||||
|
||||
// httpResponse gets an http.Response object for the object
|
||||
// using the url and method passed in
|
||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
@@ -3495,6 +3559,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
o.addResourceKey(req.Header)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
|
||||
@@ -422,11 +422,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
dir := t.TempDir()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
@@ -491,19 +487,11 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempDir1 := t.TempDir()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempDir2 := t.TempDir()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -118,12 +118,12 @@ func (b *batcher) Batching() bool {
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
@@ -137,7 +137,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||
}
|
||||
return batchStatus, nil
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
@@ -199,26 +199,11 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
complete, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
|
||||
@@ -1269,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
@@ -1370,10 +1370,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
|
||||
if timeout < 30 {
|
||||
timeout = 30
|
||||
fs.Debugf(f, "Increasing poll interval to minimum 30s")
|
||||
}
|
||||
|
||||
if timeout > 480 {
|
||||
timeout = 480
|
||||
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1650,13 +1652,37 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after session is started, we retry everything
|
||||
if err != nil {
|
||||
// Check for incorrect offset error and retry with new offset
|
||||
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
|
||||
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
fs.Debugf(o, "%s: chunk received OK - continuing", what)
|
||||
return false, nil
|
||||
} else if skip > chunkSize {
|
||||
// This error should never happen
|
||||
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
|
||||
}
|
||||
// Skip the sent data on next retry
|
||||
cursor.Offset = uint64(int64(cursor.Offset) + delta)
|
||||
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1760,7 +1786,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -100,6 +100,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Help: "Disable using MLSD even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_utf8",
|
||||
Help: "Disable using UTF-8 even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writing_mdtm",
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
@@ -184,6 +189,7 @@ type Options struct {
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
@@ -338,6 +344,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
if f.opt.DisableUTF8 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
|
||||
}
|
||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -65,7 +66,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -182,15 +183,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
}, {
|
||||
Value: "asia-northeast2",
|
||||
Help: "Osaka",
|
||||
}, {
|
||||
Value: "asia-northeast3",
|
||||
Help: "Seoul",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
}, {
|
||||
Value: "asia-south2",
|
||||
Help: "Delhi",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "asia-southeast2",
|
||||
Help: "Jakarta",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
}, {
|
||||
Value: "australia-southeast2",
|
||||
Help: "Melbourne",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
@@ -206,6 +222,12 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
}, {
|
||||
Value: "europe-west6",
|
||||
Help: "Zürich",
|
||||
}, {
|
||||
Value: "europe-central2",
|
||||
Help: "Warsaw",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
@@ -221,6 +243,33 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
}, {
|
||||
Value: "us-west3",
|
||||
Help: "Salt Lake City",
|
||||
}, {
|
||||
Value: "us-west4",
|
||||
Help: "Las Vegas",
|
||||
}, {
|
||||
Value: "northamerica-northeast1",
|
||||
Help: "Montréal",
|
||||
}, {
|
||||
Value: "northamerica-northeast2",
|
||||
Help: "Toronto",
|
||||
}, {
|
||||
Value: "southamerica-east1",
|
||||
Help: "São Paulo",
|
||||
}, {
|
||||
Value: "southamerica-west1",
|
||||
Help: "Santiago",
|
||||
}, {
|
||||
Value: "asia1",
|
||||
Help: "Dual region: asia-northeast1 and asia-northeast2.",
|
||||
}, {
|
||||
Value: "eur4",
|
||||
Help: "Dual region: europe-north1 and europe-west4.",
|
||||
}, {
|
||||
Value: "nam4",
|
||||
Help: "Dual region: us-central1 and us-east1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -247,6 +296,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_compressed",
|
||||
Help: `If set this will download compressed objects as-is.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will transparently decompress these files on
|
||||
download. This means that rclone can't check the hash or the size of
|
||||
the file as both of these refer to the compressed object.
|
||||
|
||||
If this flag is set then rclone will download files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
can check the size and hash but the file contents will be compressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -269,21 +342,24 @@ type Options struct {
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
DownloadCompressed bool `config:"download_compressed"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
warnCompressed sync.Once // warn once about compressed files
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -297,6 +373,7 @@ type Object struct {
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
gzipped bool // set if object has Content-Encoding: gzip
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -434,7 +511,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -792,6 +869,14 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
||||
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
@@ -825,7 +910,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -909,6 +994,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
o.url = info.MediaLink
|
||||
o.bytes = int64(info.Size)
|
||||
o.mimeType = info.ContentType
|
||||
o.gzipped = info.ContentEncoding == "gzip"
|
||||
|
||||
// Read md5sum
|
||||
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
||||
@@ -947,6 +1033,15 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
} else {
|
||||
o.modTime = modTime
|
||||
}
|
||||
|
||||
// If gunzipping then size and md5sum are unknown
|
||||
if o.gzipped && !o.fs.opt.DownloadCompressed {
|
||||
o.bytes = -1
|
||||
o.md5sum = ""
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
fs.Logf(o.fs, "Decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-download-compressed to override")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
@@ -1047,6 +1142,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
if o.gzipped && o.fs.opt.DownloadCompressed {
|
||||
// Allow files which are stored on the cloud storage system
|
||||
// compressed to be downloaded without being decompressed. Note
|
||||
// that setting this here overrides the automatic decompression
|
||||
// in the Transport.
|
||||
//
|
||||
// See: https://cloud.google.com/storage/docs/transcoding
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1075,7 +1179,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -562,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
for i := range items {
|
||||
item := &result.MediaItems[i]
|
||||
remote := item.Filename
|
||||
remote = strings.Replace(remote, "/", "/", -1)
|
||||
remote = strings.ReplaceAll(remote, "/", "/")
|
||||
err = fn(remote, item, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -202,7 +202,11 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
obj, err := f.wrapObject(x, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashEntries = append(hashEntries, obj)
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
@@ -251,7 +255,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
@@ -261,7 +265,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
@@ -278,7 +282,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if do := f.Fs.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return errors.New("CleanUp not supported")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
@@ -286,7 +290,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if do := f.Fs.Features().About; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
@@ -348,7 +352,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -371,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
return f.wrapObject(oResult, nil)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
@@ -410,7 +414,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
return f.wrapObject(o, err)
|
||||
}
|
||||
|
||||
//
|
||||
@@ -424,11 +428,15 @@ type Object struct {
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return &Object{Object: o, f: f}, nil
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
|
||||
@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
o fs.Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
o, err = f.wrapObject(oResult, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
err := o.(*Object).putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
|
||||
@@ -119,7 +119,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
1109
backend/internetarchive/internetarchive.go
Normal file
1109
backend/internetarchive/internetarchive.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/internetarchive/internetarchive_test.go
Normal file
17
backend/internetarchive/internetarchive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test internetarchive filesystem interface
|
||||
package internetarchive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/internetarchive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestIA:lesmi-rclone-test/",
|
||||
NilObject: (*internetarchive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -8,42 +8,69 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
// default time format historically used for all request and responses.
|
||||
// Similar to time.RFC3339, but with an extra '-' in front of 'T',
|
||||
// and no ':' separator in timezone offset. Some newer endpoints have
|
||||
// moved to proper time.RFC3339 conformant format instead.
|
||||
jottaTimeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// unmarshalXML turns XML into a Time
|
||||
func unmarshalXMLTime(d *xml.Decoder, start xml.StartElement, timeFormat string) (time.Time, error) {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
return time.Time{}, err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
return time.Time{}, nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
return newTime, nil
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
// JottaTime represents time values in the classic API using a custom RFC3339 like format
|
||||
type JottaTime time.Time
|
||||
|
||||
// String returns JottaTime string in Jottacloud classic format
|
||||
func (t JottaTime) String() string { return time.Time(t).Format(jottaTimeFormat) }
|
||||
|
||||
// UnmarshalXML turns XML into a JottaTime
|
||||
func (t *JottaTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, jottaTimeFormat)
|
||||
*t = JottaTime(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// MarshalXML turns a JottaTime into XML
|
||||
func (t *JottaTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// Rfc3339Time represents time values in the newer APIs using standard RFC3339 format
|
||||
type Rfc3339Time time.Time
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
// String returns Rfc3339Time string in Jottacloud RFC3339 format
|
||||
func (t Rfc3339Time) String() string { return time.Time(t).Format(time.RFC3339) }
|
||||
|
||||
// UnmarshalXML turns XML into a Rfc3339Time
|
||||
func (t *Rfc3339Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, time.RFC3339)
|
||||
*t = Rfc3339Time(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Rfc3339Time into XML
|
||||
func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// MarshalJSON turns a Rfc3339Time into JSON
|
||||
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
|
||||
}
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
@@ -122,16 +149,11 @@ type AllocateFileResponse struct {
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
Path string `json:"path"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified int64 `json:"modified"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
@@ -338,9 +360,9 @@ type JottaFolder struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
CreatedAt JottaTime `xml:"created"`
|
||||
ModifiedAt JottaTime `xml:"modified"`
|
||||
Updated JottaTime `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
@@ -365,17 +387,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt JottaTime `xml:"currentRevision>created"`
|
||||
ModifiedAt JottaTime `xml:"currentRevision>modified"`
|
||||
Updated JottaTime `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
|
||||
@@ -191,7 +191,7 @@ machines.`)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -519,7 +519,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
values.Set("client_id", defaultClientID)
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("scope", "openid offline_access")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
@@ -649,7 +649,7 @@ func errorHandler(resp *http.Response) error {
|
||||
|
||||
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
|
||||
func urlPathEscape(in string) string {
|
||||
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||
return strings.ReplaceAll(rest.URLPathEscape(in), "+", "%2B")
|
||||
}
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
@@ -932,26 +932,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
type listStreamTime time.Time
|
||||
|
||||
func (c *listStreamTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*c = listStreamTime(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c listStreamTime) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", time.Time(c).Format(time.RFC3339))), nil
|
||||
}
|
||||
|
||||
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||
func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||
|
||||
type stats struct {
|
||||
Folders int `xml:"folders"`
|
||||
@@ -960,12 +941,12 @@ func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, files
|
||||
var expected, actual stats
|
||||
|
||||
type xmlFile struct {
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified listStreamTime `xml:"modified"`
|
||||
Created listStreamTime `xml:"created"`
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified api.Rfc3339Time `xml:"modified"` // Note: Liststream response includes 3 decimal milliseconds, but we ignore them since there is second precision everywhere else
|
||||
Created api.Rfc3339Time `xml:"created"`
|
||||
}
|
||||
|
||||
type xmlFolder struct {
|
||||
@@ -987,8 +968,12 @@ func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, files
|
||||
})
|
||||
}
|
||||
|
||||
// liststream paths are /mountpoint/root/path
|
||||
// so the returned paths should have /mountpoint/root/ trimmed
|
||||
// as the caller is expecting path.
|
||||
pathPrefix := filesystem.opt.Enc.FromStandardPath(path.Join("/", filesystem.opt.Mountpoint, filesystem.root))
|
||||
trimPathPrefix := func(p string) string {
|
||||
p = strings.TrimPrefix(p, trimPrefix)
|
||||
p = strings.TrimPrefix(p, pathPrefix)
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
return p
|
||||
}
|
||||
@@ -1071,11 +1056,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
// liststream paths are /mountpoint/root/path
|
||||
// so the returned paths should have /mountpoint/root/ trimmed
|
||||
// as the caller is expecting path.
|
||||
trimPrefix := path.Join("/", f.opt.Mountpoint, f.root)
|
||||
err = parseListRStream(ctx, resp.Body, trimPrefix, f, func(d fs.DirEntry) error {
|
||||
err = parseListRStream(ctx, resp.Body, f, func(d fs.DirEntry) error {
|
||||
if d.Remote() == dir {
|
||||
return nil
|
||||
}
|
||||
@@ -1210,6 +1191,45 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// createOrUpdate tries to make remote file match without uploading.
|
||||
// If the remote file exists, and has matching size and md5, only
|
||||
// timestamps are updated. If the file does not exist or does does
|
||||
// not match size and md5, but matching content can be constructed
|
||||
// from deduplication, the file will be updated/created. If the file
|
||||
// is currently in trash, but can be made to match, it will be
|
||||
// restored. Returns ErrorObjectNotFound if upload will be necessary
|
||||
// to get a matching remote file.
|
||||
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.filePath(file),
|
||||
Parameters: url.Values{},
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
|
||||
opts.Parameters.Set("cphash", "true")
|
||||
|
||||
fileDate := api.JottaTime(modTime).String()
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
opts.ExtraHeaders["JMd5"] = md5
|
||||
opts.ExtraHeaders["JCreated"] = fileDate
|
||||
opts.ExtraHeaders["JModified"] = fileDate
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist, i.e. not matching size and md5, and not possible to make it by deduplication
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
@@ -1253,6 +1273,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
@@ -1554,40 +1580,19 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// prepare allocate request with existing metadata but changed timestamps
|
||||
var resp *http.Response
|
||||
var options []fs.OpenOption
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(modTime).APIString()
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: o.size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: o.md5,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
// request check/update with existing metadata and new modtime
|
||||
// (note that if size/md5 does not match, the file content will
|
||||
// also be modified if deduplication is possible, i.e. it is
|
||||
// important to use correct/latest values)
|
||||
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// check response
|
||||
if response.State != "COMPLETED" {
|
||||
// could be the file was modified (size/md5 changed) between readMetaData and the allocate request
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
|
||||
// update local metadata
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
@@ -1725,7 +1730,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
|
||||
@@ -28,13 +28,28 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your storage provider.",
|
||||
// NOTE if you add a new provider here, then add it in the
|
||||
// setProviderDefaults() function and update options accordingly
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "koofr",
|
||||
Help: "Koofr, https://app.koofr.net/",
|
||||
}, {
|
||||
Value: "digistorage",
|
||||
Help: "Digi Storage, https://storage.rcs-rds.ro/",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Any other Koofr API compatible storage service",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Default: "https://app.koofr.net",
|
||||
Advanced: true,
|
||||
Provider: "other",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
@@ -46,11 +61,24 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name.",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Provider: "koofr",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
|
||||
Provider: "digistorage",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at your service's settings page).",
|
||||
Provider: "other",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -67,6 +95,7 @@ func init() {
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
@@ -251,13 +280,38 @@ func (f *Fs) fullPath(part string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func setProviderDefaults(opt *Options) {
|
||||
// handle old, provider-less configs
|
||||
if opt.Provider == "" {
|
||||
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
|
||||
opt.Provider = "koofr"
|
||||
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
|
||||
opt.Provider = "digistorage"
|
||||
} else {
|
||||
opt.Provider = "other"
|
||||
}
|
||||
}
|
||||
// now assign an endpoint
|
||||
if opt.Provider == "koofr" {
|
||||
opt.Endpoint = "https://app.koofr.net"
|
||||
} else if opt.Provider == "digistorage" {
|
||||
opt.Endpoint = "https://storage.rcs-rds.ro"
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and rclone configuration options
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setProviderDefaults(opt)
|
||||
return NewFsFromOptions(ctx, name, root, opt)
|
||||
}
|
||||
|
||||
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
|
||||
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -58,7 +58,7 @@ type UserInfoResponse struct {
|
||||
AutoProlong bool `json:"auto_prolong"`
|
||||
Basequota int64 `json:"basequota"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Expires int `json:"expires"`
|
||||
Expires int64 `json:"expires"`
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
@@ -80,7 +80,7 @@ type UserInfoResponse struct {
|
||||
FileSizeLimit int64 `json:"file_size_limit"`
|
||||
Space struct {
|
||||
BytesTotal int64 `json:"bytes_total"`
|
||||
BytesUsed int `json:"bytes_used"`
|
||||
BytesUsed int64 `json:"bytes_used"`
|
||||
Overquota bool `json:"overquota"`
|
||||
} `json:"space"`
|
||||
} `json:"cloud"`
|
||||
|
||||
@@ -1572,7 +1572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
total := info.Body.Cloud.Space.BytesTotal
|
||||
used := int64(info.Body.Cloud.Space.BytesUsed)
|
||||
used := info.Body.Cloud.Space.BytesUsed
|
||||
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total),
|
||||
|
||||
1277
backend/netstorage/netstorage.go
Executable file
1277
backend/netstorage/netstorage.go
Executable file
File diff suppressed because it is too large
Load Diff
16
backend/netstorage/netstorage_test.go
Normal file
16
backend/netstorage/netstorage_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package netstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/netstorage"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestnStorage:",
|
||||
NilObject: (*netstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -140,6 +140,15 @@ Note that the chunks will be buffered into memory.`,
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
||||
This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
@@ -292,6 +301,10 @@ type siteResource struct {
|
||||
type siteResponse struct {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
type deltaResponse struct {
|
||||
DeltaLink string `json:"@odata.deltaLink"`
|
||||
Value []api.Item `json:"value"`
|
||||
}
|
||||
|
||||
// Get the region and graphURL from the config
|
||||
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
|
||||
@@ -547,6 +560,7 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
@@ -639,6 +653,12 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
retry := false
|
||||
if resp != nil {
|
||||
switch resp.StatusCode {
|
||||
case 400:
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
case 401:
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
retry = true
|
||||
@@ -852,15 +872,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
// Get rootID
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
var rootID = opt.RootFolderID
|
||||
if rootID == "" {
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
}
|
||||
rootID = rootInfo.GetID()
|
||||
}
|
||||
if rootInfo.GetID() == "" {
|
||||
if rootID == "" {
|
||||
return nil, errors.New("failed to get root: ID was empty")
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
@@ -868,7 +892,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
@@ -1482,7 +1506,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
@@ -2282,6 +2306,142 @@ func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
|
||||
return canonicalDriveID
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
//
|
||||
// The Onedrive implementation gives the whole hierarchy up to the top when
|
||||
// an object is changed. For instance, if a/b/c is changed, this function
|
||||
// will call notifyFunc with a, a/b and a/b/c.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the StartPageToken early so all changes from now on get processed
|
||||
nextDeltaToken, err := f.changeNotifyStartPageToken(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Could not get first deltaLink: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Next delta token is: %s", nextDeltaToken)
|
||||
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
select {
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
if pollInterval != 0 {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
fs.Debugf(f, "Checking for changes on remote")
|
||||
nextDeltaToken, err = f.changeNotifyRunner(ctx, notifyFunc, nextDeltaToken)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken string, err error) {
|
||||
delta, err := f.changeNotifyNextChange(ctx, "latest")
|
||||
parsedURL, err := url.Parse(delta.DeltaLink)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nextDeltaToken = parsedURL.Query().Get("token")
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta deltaResponse, err error) {
|
||||
opts := f.buildDriveDeltaOpts(token)
|
||||
|
||||
_, err = f.srv.CallJSON(ctx, &opts, nil, &delta)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) buildDriveDeltaOpts(token string) rest.Opts {
|
||||
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
|
||||
|
||||
return rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + f.driveID + "/root/delta",
|
||||
Parameters: map[string][]string{"token": {token}},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), deltaToken string) (nextDeltaToken string, err error) {
|
||||
delta, err := f.changeNotifyNextChange(ctx, deltaToken)
|
||||
parsedURL, err := url.Parse(delta.DeltaLink)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nextDeltaToken = parsedURL.Query().Get("token")
|
||||
|
||||
for _, item := range delta.Value {
|
||||
isDriveRootFolder := item.GetParentReference().ID == ""
|
||||
if isDriveRootFolder {
|
||||
continue
|
||||
}
|
||||
|
||||
fullPath, err := getItemFullPath(&item)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Could not get item full path: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fullPath == f.root {
|
||||
continue
|
||||
}
|
||||
|
||||
relName, insideRoot := getRelativePathInsideBase(f.root, fullPath)
|
||||
if !insideRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.GetFile() != nil {
|
||||
notifyFunc(relName, fs.EntryObject)
|
||||
} else if item.GetFolder() != nil {
|
||||
notifyFunc(relName, fs.EntryDirectory)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getItemFullPath(item *api.Item) (fullPath string, err error) {
|
||||
err = nil
|
||||
fullPath = item.GetName()
|
||||
if parent := item.GetParentReference(); parent != nil && parent.Path != "" {
|
||||
pathParts := strings.SplitN(parent.Path, ":", 2)
|
||||
if len(pathParts) != 2 {
|
||||
err = fmt.Errorf("invalid parent path: %s", parent.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if pathParts[1] != "" {
|
||||
fullPath = strings.TrimPrefix(pathParts[1], "/") + "/" + fullPath
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
|
||||
@@ -690,7 +690,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -906,7 +906,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||
@@ -1171,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", modTime.Unix()))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
|
||||
|
||||
// Special treatment for a 0 length upload. This doesn't work
|
||||
// with PUT even with Content-Length set (by setting
|
||||
|
||||
@@ -783,10 +783,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CreateDir http: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
if err = info.AsErr(); err != nil {
|
||||
return nil, fmt.Errorf("CreateDir: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
|
||||
|
||||
@@ -4,16 +4,21 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
func checkStatusCode(resp *http.Response, expected int) error {
|
||||
if resp.StatusCode != expected {
|
||||
return &statusCodeError{response: resp}
|
||||
func checkStatusCode(resp *http.Response, expected ...int) error {
|
||||
for _, code := range expected {
|
||||
if resp.StatusCode == code {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return &statusCodeError{response: resp}
|
||||
}
|
||||
|
||||
type statusCodeError struct {
|
||||
@@ -24,8 +29,10 @@ func (e *statusCodeError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
|
||||
}
|
||||
|
||||
// This method is called from fserrors.ShouldRetry() to determine if an error should be retried.
|
||||
// Some errors (e.g. 429 Too Many Requests) are handled before this step, so they are not included here.
|
||||
func (e *statusCodeError) Temporary() bool {
|
||||
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
|
||||
return e.response.StatusCode >= 500
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
@@ -40,6 +47,16 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
err = &statusCodeError{response: perr.Response}
|
||||
}
|
||||
if scerr, ok := err.(*statusCodeError); ok && scerr.response.StatusCode == 429 {
|
||||
delay := defaultRateLimitSleep
|
||||
header := scerr.response.Header.Get("x-ratelimit-reset")
|
||||
if header != "" {
|
||||
if resetTime, cerr := strconv.ParseInt(header, 10, 64); cerr == nil {
|
||||
delay = time.Until(time.Unix(resetTime+1, 0))
|
||||
}
|
||||
}
|
||||
return true, pacer.RetryAfterError(scerr, delay)
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -302,8 +302,8 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.StatusCode != 201 {
|
||||
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
|
||||
if err := checkStatusCode(resp, 201); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
location = resp.Header.Get("location")
|
||||
if location == "" {
|
||||
@@ -647,7 +647,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
|
||||
|
||||
@@ -241,7 +241,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
return shouldRetry(ctx, err)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
if err := checkStatusCode(resp, 200, 206); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
|
||||
@@ -33,8 +33,9 @@ const (
|
||||
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
defaultRateLimitSleep = 60 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
721
backend/s3/s3.go
721
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -109,7 +109,7 @@ when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: `Enable the use of insecure ciphers and key exchange methods.
|
||||
Help: `Enable the use of insecure ciphers and key exchange methods.
|
||||
|
||||
This enables the use of the following insecure ciphers and key exchange methods:
|
||||
|
||||
@@ -1220,7 +1220,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("your remote may not support About: %w", err)
|
||||
return nil, fmt.Errorf("your remote may not have the required df utility: %w", err)
|
||||
}
|
||||
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
|
||||
@@ -1311,7 +1311,7 @@ var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\
|
||||
// when sending it to a shell.
|
||||
func shellEscape(str string) string {
|
||||
safe := shellEscapeRegex.ReplaceAllString(str, `\$0`)
|
||||
return strings.Replace(safe, "\n", "'\n'", -1)
|
||||
return strings.ReplaceAll(safe, "\n", "'\n'")
|
||||
}
|
||||
|
||||
// Converts a byte array from the SSH session returned by
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
package tardigrade
|
||||
// Package storj provides an interface to Storj decentralized object storage.
|
||||
package storj
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -31,16 +31,17 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
Name: "storj",
|
||||
Description: "Storj Decentralized Cloud Storage",
|
||||
Aliases: []string{"tardigrade"},
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
provider, _ := m.Get(fs.ConfigProvider)
|
||||
@@ -104,15 +105,15 @@ func init() {
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Provider: newProvider,
|
||||
Default: "us-central-1.tardigrade.io",
|
||||
Default: "us-central-1.storj.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-central-1.tardigrade.io",
|
||||
Value: "us-central-1.storj.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "europe-west-1.tardigrade.io",
|
||||
Value: "europe-west-1.storj.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "asia-east-1.tardigrade.io",
|
||||
Value: "asia-east-1.storj.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
@@ -140,7 +141,7 @@ type Options struct {
|
||||
Passphrase string `config:"passphrase"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to Tardigrade
|
||||
// Fs represents a remote to Storj
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the filesystem
|
||||
@@ -158,11 +159,12 @@ var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Tardigrade.
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||
// Setup filesystem and connection to Tardigrade
|
||||
// Setup filesystem and connection to Storj
|
||||
root = norm.NFC.String(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -183,24 +185,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if f.opts.Access != "" {
|
||||
access, err = uplink.ParseAccess(f.opts.Access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
|
||||
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +234,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if bucketName != "" && bucketPath != "" {
|
||||
_, err = project.StatBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return f, fmt.Errorf("tardigrade: bucket: %w", err)
|
||||
return f, fmt.Errorf("storj: bucket: %w", err)
|
||||
}
|
||||
|
||||
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||
@@ -258,7 +260,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// connect opens a connection to Tardigrade.
|
||||
// connect opens a connection to Storj.
|
||||
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
fs.Debugf(f, "connecting...")
|
||||
defer fs.Debugf(f, "connected: %+v", err)
|
||||
@@ -269,7 +271,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: project: %w", err)
|
||||
return nil, fmt.Errorf("storj: project: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -577,7 +579,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, "", upload.Info()), nil
|
||||
return newObjectFromUplink(f, src.Remote(), upload.Info()), nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate
|
||||
@@ -678,3 +680,43 @@ func newPrefix(prefix string) string {
|
||||
|
||||
return prefix + "/"
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.MoveObjectOptions{}
|
||||
|
||||
// Do the move
|
||||
err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
// Make sure destination bucket exists
|
||||
_, err := f.project.EnsureBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err)
|
||||
}
|
||||
// And try again
|
||||
err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package tardigrade
|
||||
package storj
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// Object describes a Tardigrade object
|
||||
// Object describes a Storj object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
|
||||
@@ -32,7 +32,7 @@ type Object struct {
|
||||
// Check the interfaces are satisfied.
|
||||
var _ fs.Object = &Object{}
|
||||
|
||||
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
|
||||
// newObjectFromUplink creates a new object from a Storj uplink object.
|
||||
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
|
||||
// Attempt to use the modified time from the metadata. Otherwise
|
||||
// fallback to the server time.
|
||||
@@ -1,20 +1,20 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Test Tardigrade filesystem interface
|
||||
package tardigrade_test
|
||||
// Test Storj filesystem interface
|
||||
package storj_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/tardigrade"
|
||||
"github.com/rclone/rclone/backend/storj"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestTardigrade:",
|
||||
NilObject: (*tardigrade.Object)(nil),
|
||||
RemoteName: "TestStorj:",
|
||||
NilObject: (*storj.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package tardigrade
|
||||
package storj
|
||||
@@ -754,22 +754,34 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var total, objects int64
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
if f.rootContainer != "" {
|
||||
var container swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
container, _, err = f.c.Container(ctx, f.rootContainer)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container info failed: %w", err)
|
||||
}
|
||||
total = container.Bytes
|
||||
objects = container.Count
|
||||
} else {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
}
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(total), // bytes in use
|
||||
Objects: fs.NewUsageValue(objects), // objects in use
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -84,6 +85,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||
if len(entries) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -140,22 +141,20 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// mkdir makes the directory passed in and returns the upstreams used
|
||||
func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
parent := parentDir(dir)
|
||||
if dir != parent {
|
||||
upstreams, err = f.mkdir(ctx, parent)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
@@ -164,7 +163,17 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return upstreams, nil
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
_, err := f.mkdir(ctx, dir)
|
||||
return err
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
@@ -448,10 +457,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
srcPath := src.Remote()
|
||||
upstreams, err := f.create(ctx, srcPath)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if err := f.Mkdir(ctx, parentDir(srcPath)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upstreams, err = f.create(ctx, srcPath)
|
||||
upstreams, err = f.mkdir(ctx, parentDir(srcPath))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -486,6 +492,10 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
if len(upstreams) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -20,19 +18,12 @@ import (
|
||||
)
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
|
||||
require.NoError(t, err)
|
||||
dir := t.TempDir()
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
clean = func() {
|
||||
for _, dir := range dirs {
|
||||
err := os.RemoveAll(dir)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
return dirs, clean
|
||||
return dirs
|
||||
}
|
||||
|
||||
func (f *Fs) TestInternalReadOnly(t *testing.T) {
|
||||
@@ -95,8 +86,7 @@ func TestMoveCopy(t *testing.T) {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
ctx := context.Background()
|
||||
dirs, clean := MakeTestDirs(t, 1)
|
||||
defer clean()
|
||||
dirs := MakeTestDirs(t, 1)
|
||||
fsString := fmt.Sprintf(":union,upstreams='%s :memory:bucket':", dirs[0])
|
||||
f, err := fs.NewFs(ctx, fsString)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -27,8 +27,7 @@ func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnion"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -49,8 +48,7 @@ func TestRO(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
|
||||
name := "TestUnionRO"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -71,8 +69,7 @@ func TestNC(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
|
||||
name := "TestUnionNC"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -93,8 +90,7 @@ func TestPolicy1(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy1"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -115,8 +111,7 @@ func TestPolicy2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -137,8 +132,7 @@ func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
@@ -124,6 +124,22 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "base_path",
|
||||
Help: `Base path of expected replies
|
||||
|
||||
Normally WebDAV servers return the files they are listing under the
|
||||
url path as specified above. However some WebDAV servers return files
|
||||
with URLs that are not under the endpoint URL. This causes rclone to
|
||||
get confused and return errors like
|
||||
|
||||
Item with unknown path received: "/remote.php/webdav/folder1/", "/elsewhere/remote.php/webdav/folder1/"
|
||||
|
||||
errors. If that is the case, then set "base_path" to the path
|
||||
specified in the error message up to the first item, in the above
|
||||
example "/elsewhere/remote.php/webdav/".
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -138,6 +154,7 @@ type Options struct {
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
BasePath string `config:"base_path"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -454,7 +471,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.srv.SetHeader("Referer", u.String())
|
||||
if !f.findHeader(opt.Headers, "Referer") {
|
||||
f.srv.SetHeader("Referer", u.String())
|
||||
}
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -517,6 +536,17 @@ func (f *Fs) addHeaders(headers fs.CommaSepList) {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the header was configured
|
||||
func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool {
|
||||
for i := 0; i < len(headers); i += 2 {
|
||||
key := f.opt.Headers[i]
|
||||
if strings.EqualFold(key, find) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fetch the bearer token and set it if successful
|
||||
func (f *Fs) fetchAndSetBearerToken() error {
|
||||
if f.opt.BearerTokenCommand == "" {
|
||||
@@ -680,6 +710,10 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("couldn't join URL: %w", err)
|
||||
}
|
||||
basePath := baseURL.Path
|
||||
if f.opt.BasePath != "" {
|
||||
basePath = f.opt.BasePath
|
||||
}
|
||||
for i := range result.Responses {
|
||||
item := &result.Responses[i]
|
||||
isDir := itemIsDir(item)
|
||||
@@ -694,11 +728,11 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
if isDir {
|
||||
u.Path = addSlash(u.Path)
|
||||
}
|
||||
if !strings.HasPrefix(u.Path, baseURL.Path) {
|
||||
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, baseURL.Path)
|
||||
if !strings.HasPrefix(u.Path, basePath) {
|
||||
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, basePath)
|
||||
continue
|
||||
}
|
||||
subPath := u.Path[len(baseURL.Path):]
|
||||
subPath := u.Path[len(basePath):]
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.ToStandardPath(subPath)
|
||||
}
|
||||
@@ -1148,7 +1182,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about call failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if i, err := strconv.ParseInt(q.Used, 10, 64); err == nil && i >= 0 {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/winfsp/cgofuse.git
|
||||
docker images
|
||||
docker push rclone/xgo-cgofuse
|
||||
|
||||
@@ -52,6 +52,7 @@ var (
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"windows/arm64",
|
||||
"darwin/amd64",
|
||||
"darwin/arm64",
|
||||
"linux/386",
|
||||
@@ -85,6 +86,13 @@ var archFlags = map[string][]string{
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) error {
|
||||
if *debug {
|
||||
@@ -165,13 +173,17 @@ func buildZip(dir string) string {
|
||||
func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
// Make internal version number acceptable to .deb and .rpm
|
||||
pkgVersion := version[1:]
|
||||
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
|
||||
pkgVersion = strings.Replace(pkgVersion, "-", ".", -1)
|
||||
pkgVersion = strings.ReplaceAll(pkgVersion, "β", "-beta")
|
||||
pkgVersion = strings.ReplaceAll(pkgVersion, "-", ".")
|
||||
nfpmArch, ok := goarchToNfpm[goarch]
|
||||
if !ok {
|
||||
nfpmArch = goarch
|
||||
}
|
||||
|
||||
// Make nfpm.yaml from the template
|
||||
substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{
|
||||
"Version": pkgVersion,
|
||||
"Arch": goarch,
|
||||
"Arch": nfpmArch,
|
||||
})
|
||||
|
||||
// build them
|
||||
@@ -253,9 +265,12 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
"-o",
|
||||
sysoPath,
|
||||
}
|
||||
if goarch == "amd64" {
|
||||
if strings.Contains(goarch, "64") {
|
||||
args = append(args, "-64") // Make the syso a 64-bit coff file
|
||||
}
|
||||
if strings.Contains(goarch, "arm") {
|
||||
args = append(args, "-arm") // Make the syso an arm binary
|
||||
}
|
||||
args = append(args, jsonPath)
|
||||
err = runEnv(args, nil)
|
||||
if err != nil {
|
||||
@@ -377,7 +392,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
|
||||
@@ -24,6 +24,7 @@ docs = [
|
||||
"overview.md",
|
||||
"flags.md",
|
||||
"docker.md",
|
||||
"bisync.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
@@ -47,11 +48,13 @@ docs = [
|
||||
"hdfs.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mailru.md",
|
||||
"mega.md",
|
||||
"memory.md",
|
||||
"netstorage.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
@@ -63,8 +66,9 @@ docs = [
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
|
||||
@@ -102,7 +102,7 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
|
||||
}
|
||||
u, err := doAbout(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("About call failed: %w", err)
|
||||
return fmt.Errorf("about call failed: %w", err)
|
||||
}
|
||||
if u == nil {
|
||||
return errors.New("nil usage returned")
|
||||
|
||||
@@ -489,7 +489,7 @@ func resolveExitCode(err error) {
|
||||
os.Exit(exitcode.TransferExceeded)
|
||||
case fserrors.ShouldRetry(err):
|
||||
os.Exit(exitcode.RetryError)
|
||||
case fserrors.IsNoRetryError(err):
|
||||
case fserrors.IsNoRetryError(err), fserrors.IsNoLowLevelRetryError(err):
|
||||
os.Exit(exitcode.NoRetryError)
|
||||
case fserrors.IsFatalError(err):
|
||||
os.Exit(exitcode.FatalError)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -13,12 +12,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
)
|
||||
|
||||
const fhUnset = ^uint64(0)
|
||||
|
||||
@@ -2,10 +2,9 @@
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -18,12 +17,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -168,7 +167,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
|
||||
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
//go:build cmount && cgo && (linux || darwin || freebsd || windows) && (!race || !windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
// +build !race !windows
|
||||
|
||||
// FIXME this doesn't work with the race detector under Windows either
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build (!linux && !darwin && !freebsd && !windows) || !brew || !cgo || !cmount
|
||||
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
|
||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
|
||||
// +build !linux !cgo !cmount
|
||||
// +build !darwin !cgo !cmount
|
||||
// +build !freebsd !cgo !cmount
|
||||
// +build !windows !cmount
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build cmount && cgo && windows
|
||||
// +build cmount,cgo,windows
|
||||
//go:build cmount && windows
|
||||
// +build cmount,windows
|
||||
|
||||
package cmount
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ rclone.org website.`,
|
||||
var description = map[string]string{}
|
||||
var addDescription func(root *cobra.Command)
|
||||
addDescription = func(root *cobra.Command) {
|
||||
name := strings.Replace(root.CommandPath(), " ", "_", -1) + ".md"
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
description[name] = root.Short
|
||||
for _, c := range root.Commands() {
|
||||
addDescription(c)
|
||||
@@ -93,11 +93,11 @@ rclone.org website.`,
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
data := frontmatter{
|
||||
Date: now,
|
||||
Title: strings.Replace(base, "_", " ", -1),
|
||||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: description[name],
|
||||
Slug: base,
|
||||
URL: "/commands/" + strings.ToLower(base) + "/",
|
||||
Source: strings.Replace(strings.Replace(base, "rclone", "cmd", -1), "_", "/", -1) + "/",
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := frontmatterTemplate.Execute(&buf, data)
|
||||
|
||||
21
cmd/help.go
21
cmd/help.go
@@ -329,12 +329,29 @@ func showBackend(name string) {
|
||||
if opt.IsPassword {
|
||||
fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n")
|
||||
}
|
||||
fmt.Printf("Properties:\n\n")
|
||||
fmt.Printf("- Config: %s\n", opt.Name)
|
||||
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
|
||||
if opt.Provider != "" {
|
||||
fmt.Printf("- Provider: %s\n", opt.Provider)
|
||||
}
|
||||
fmt.Printf("- Type: %s\n", opt.Type())
|
||||
fmt.Printf("- Default: %s\n", quoteString(opt.GetValue()))
|
||||
defaultValue := opt.GetValue()
|
||||
// Default value and Required are related: Required means option must
|
||||
// have a value, but if there is a default then a value does not have
|
||||
// to be explicitely set and then Required makes no difference.
|
||||
if defaultValue != "" {
|
||||
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
|
||||
} else {
|
||||
fmt.Printf("- Required: %v\n", opt.Required)
|
||||
}
|
||||
// List examples / possible choices
|
||||
if len(opt.Examples) > 0 {
|
||||
fmt.Printf("- Examples:\n")
|
||||
if opt.Exclusive {
|
||||
fmt.Printf("- Choices:\n")
|
||||
} else {
|
||||
fmt.Printf("- Examples:\n")
|
||||
}
|
||||
for _, ex := range opt.Examples {
|
||||
fmt.Printf(" - %s\n", quoteString(ex.Value))
|
||||
for _, line := range strings.Split(ex.Help, "\n") {
|
||||
|
||||
@@ -86,7 +86,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -25,11 +25,10 @@ func init() {
|
||||
// mountOptions configures the options from the command line flags
|
||||
//
|
||||
// man mount.fuse for more info and note the -o flag for other options
|
||||
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.MountOptions) {
|
||||
mountOpts = &fuse.MountOptions{
|
||||
AllowOther: fsys.opt.AllowOther,
|
||||
FsName: device,
|
||||
FsName: opt.DeviceName,
|
||||
Name: "rclone",
|
||||
DisableXAttrs: true,
|
||||
Debug: fsys.opt.DebugFUSE,
|
||||
@@ -120,7 +119,7 @@ func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
opts = append(opts,
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
fmt.Sprintf("volname=%s", device),
|
||||
fmt.Sprintf("volname=%s", opt.VolumeName),
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
@@ -167,7 +166,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
//mOpts.Debug = mountlib.DebugFUSE
|
||||
|
||||
//conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts)
|
||||
mountOpts := mountOptions(fsys, f)
|
||||
mountOpts := mountOptions(fsys, f, opt)
|
||||
|
||||
// FIXME fill out
|
||||
opts := fusefs.Options{
|
||||
|
||||
@@ -65,10 +65,10 @@ at all, then 1 PiB is set as both the total and the free size.
|
||||
To run rclone @ on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open-source
|
||||
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
uses combination with [cgofuse](https://github.com/winfsp/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone @ for Windows.
|
||||
|
||||
@@ -218,7 +218,7 @@ from Microsoft's Sysinternals suite, which has option |-s| to start
|
||||
processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
|
||||
@@ -40,6 +40,7 @@ type Options struct {
|
||||
ExtraOptions []string
|
||||
ExtraFlags []string
|
||||
AttrTimeout time.Duration // how long the kernel caches attribute for
|
||||
DeviceName string
|
||||
VolumeName string
|
||||
NoAppleDouble bool
|
||||
NoAppleXattr bool
|
||||
@@ -77,6 +78,17 @@ type MountPoint struct {
|
||||
ErrChan <-chan error
|
||||
}
|
||||
|
||||
// NewMountPoint makes a new mounting structure
|
||||
func NewMountPoint(mount MountFn, mountPoint string, f fs.Fs, mountOpt *Options, vfsOpt *vfscommon.Options) *MountPoint {
|
||||
return &MountPoint{
|
||||
MountFn: mount,
|
||||
MountPoint: mountPoint,
|
||||
Fs: f,
|
||||
MountOpt: *mountOpt,
|
||||
VFSOpt: *vfsOpt,
|
||||
}
|
||||
}
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
MaxLeafSize = 1024 // don't pass file names longer than this
|
||||
@@ -125,6 +137,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
|
||||
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
|
||||
// Windows and OSX
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
|
||||
// OSX only
|
||||
@@ -165,14 +178,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
defer cmd.StartStats()()
|
||||
}
|
||||
|
||||
mnt := &MountPoint{
|
||||
MountFn: mount,
|
||||
MountPoint: args[1],
|
||||
Fs: cmd.NewFsDir(args),
|
||||
MountOpt: Opt,
|
||||
VFSOpt: vfsflags.Opt,
|
||||
}
|
||||
|
||||
mnt := NewMountPoint(mount, args[1], cmd.NewFsDir(args), &Opt, &vfsflags.Opt)
|
||||
daemon, err := mnt.Mount()
|
||||
|
||||
// Wait for foreground mount, if any...
|
||||
@@ -235,6 +241,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
return nil, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||
|
||||
// Start background task if --daemon is specified
|
||||
if m.MountOpt.Daemon {
|
||||
@@ -250,6 +257,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
||||
}
|
||||
m.MountedOn = time.Now()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
@@ -117,23 +116,15 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
VFS := vfs.New(fdst, &vfsOpt)
|
||||
_, unmountFn, err := mountFn(VFS, mountPoint, &mountOpt)
|
||||
mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt)
|
||||
_, err = mnt.Mount()
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = &MountPoint{
|
||||
MountPoint: mountPoint,
|
||||
MountedOn: time.Now(),
|
||||
MountFn: mountFn,
|
||||
UnmountFn: unmountFn,
|
||||
MountOpt: mountOpt,
|
||||
VFSOpt: vfsOpt,
|
||||
Fs: fdst,
|
||||
}
|
||||
liveMounts[mountPoint] = mnt
|
||||
|
||||
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
|
||||
return nil, nil
|
||||
|
||||
@@ -35,19 +35,14 @@ func TestRc(t *testing.T) {
|
||||
getMountTypes := rc.Calls.Get("mount/types")
|
||||
assert.NotNil(t, getMountTypes)
|
||||
|
||||
localDir, err := ioutil.TempDir("", "rclone-mountlib-localDir")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(localDir) }()
|
||||
err = ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
localDir := t.TempDir()
|
||||
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
mountPoint, err := ioutil.TempDir("", "rclone-mountlib-mountPoint")
|
||||
require.NoError(t, err)
|
||||
mountPoint := t.TempDir()
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows requires the mount point not to exist
|
||||
require.NoError(t, os.RemoveAll(mountPoint))
|
||||
} else {
|
||||
defer func() { _ = os.RemoveAll(mountPoint) }()
|
||||
}
|
||||
|
||||
out, err := getMountTypes.Fn(ctx, nil)
|
||||
|
||||
@@ -87,7 +87,7 @@ func (m *MountPoint) CheckAllowings() error {
|
||||
// SetVolumeName with sensible default
|
||||
func (m *MountPoint) SetVolumeName(vol string) {
|
||||
if vol == "" {
|
||||
vol = m.Fs.Name() + ":" + m.Fs.Root()
|
||||
vol = fs.ConfigString(m.Fs)
|
||||
}
|
||||
m.MountOpt.SetVolumeName(vol)
|
||||
}
|
||||
@@ -102,3 +102,11 @@ func (o *Options) SetVolumeName(vol string) {
|
||||
}
|
||||
o.VolumeName = vol
|
||||
}
|
||||
|
||||
// SetDeviceName with sensible default
|
||||
func (m *MountPoint) SetDeviceName(dev string) {
|
||||
if dev == "" {
|
||||
dev = fs.ConfigString(m.Fs)
|
||||
}
|
||||
m.MountOpt.DeviceName = dev
|
||||
}
|
||||
|
||||
@@ -42,15 +42,31 @@ builds an in memory representation. rclone ncdu can be used during
|
||||
this scanning phase and you will see it building up the directory
|
||||
structure as it goes along.
|
||||
|
||||
Here are the keys - press '?' to toggle the help on and off
|
||||
You can interact with the user interface using key presses,
|
||||
press '?' to toggle the help on and off. The supported keys are:
|
||||
|
||||
` + strings.Join(helpText()[1:], "\n ") + `
|
||||
|
||||
Listed files/directories may be prefixed by a one-character flag,
|
||||
some of them combined with a description in brackes at end of line.
|
||||
These flags have the following meaning:
|
||||
|
||||
e means this is an empty directory, i.e. contains no files (but
|
||||
may contain empty subdirectories)
|
||||
~ means this is a directory where some of the files (possibly in
|
||||
subdirectories) have unknown size, and therefore the directory
|
||||
size may be underestimated (and average size inaccurate, as it
|
||||
is average of the files with known sizes).
|
||||
. means an error occurred while reading a subdirectory, and
|
||||
therefore the directory size may be underestimated (and average
|
||||
size inaccurate)
|
||||
! means an error occurred while reading this directory
|
||||
|
||||
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
|
||||
rclone remotes. It is missing lots of features at the moment
|
||||
but is useful as it stands.
|
||||
|
||||
Note that it might take some time to delete big files/folders. The
|
||||
Note that it might take some time to delete big files/directories. The
|
||||
UI won't respond in the meantime since the deletion is done synchronously.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
@@ -283,9 +299,9 @@ func (u *UI) biggestEntry() (biggest int64) {
|
||||
return
|
||||
}
|
||||
for i := range u.entries {
|
||||
size, _, _, _, _, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if size > biggest {
|
||||
biggest = size
|
||||
attrs, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if attrs.Size > biggest {
|
||||
biggest = attrs.Size
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -297,8 +313,8 @@ func (u *UI) hasEmptyDir() bool {
|
||||
return false
|
||||
}
|
||||
for i := range u.entries {
|
||||
_, count, isDir, _, _, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if isDir && count == 0 {
|
||||
attrs, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if attrs.IsDir && attrs.Count == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -343,9 +359,9 @@ func (u *UI) Draw() error {
|
||||
if y >= h-1 {
|
||||
break
|
||||
}
|
||||
size, count, isDir, readable, entriesHaveErrors, err := u.d.AttrI(u.sortPerm[n])
|
||||
attrs, err := u.d.AttrI(u.sortPerm[n])
|
||||
fg := termbox.ColorWhite
|
||||
if entriesHaveErrors {
|
||||
if attrs.EntriesHaveErrors {
|
||||
fg = termbox.ColorYellow
|
||||
}
|
||||
if err != nil {
|
||||
@@ -356,15 +372,19 @@ func (u *UI) Draw() error {
|
||||
fg, bg = bg, fg
|
||||
}
|
||||
mark := ' '
|
||||
if isDir {
|
||||
if attrs.IsDir {
|
||||
mark = '/'
|
||||
}
|
||||
fileFlag := ' '
|
||||
message := ""
|
||||
if !readable {
|
||||
if !attrs.Readable {
|
||||
message = " [not read yet]"
|
||||
}
|
||||
if entriesHaveErrors {
|
||||
if attrs.CountUnknownSize > 0 {
|
||||
message = fmt.Sprintf(" [%d of %d files have unknown size, size may be underestimated]", attrs.CountUnknownSize, attrs.Count)
|
||||
fileFlag = '~'
|
||||
}
|
||||
if attrs.EntriesHaveErrors {
|
||||
message = " [some subdirectories could not be read, size may be underestimated]"
|
||||
fileFlag = '.'
|
||||
}
|
||||
@@ -374,32 +394,29 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras := ""
|
||||
if u.showCounts {
|
||||
ss := operations.CountStringField(count, u.humanReadable, 9) + " "
|
||||
if count > 0 {
|
||||
ss := operations.CountStringField(attrs.Count, u.humanReadable, 9) + " "
|
||||
if attrs.Count > 0 {
|
||||
extras += ss
|
||||
} else {
|
||||
extras += strings.Repeat(" ", len(ss))
|
||||
}
|
||||
}
|
||||
var averageSize float64
|
||||
if count > 0 {
|
||||
averageSize = float64(size) / float64(count)
|
||||
}
|
||||
if u.showDirAverageSize {
|
||||
ss := operations.SizeStringField(int64(averageSize), u.humanReadable, 9) + " "
|
||||
if averageSize > 0 {
|
||||
avg := attrs.AverageSize()
|
||||
ss := operations.SizeStringField(int64(avg), u.humanReadable, 9) + " "
|
||||
if avg > 0 {
|
||||
extras += ss
|
||||
} else {
|
||||
extras += strings.Repeat(" ", len(ss))
|
||||
}
|
||||
}
|
||||
if showEmptyDir {
|
||||
if isDir && count == 0 && fileFlag == ' ' {
|
||||
if attrs.IsDir && attrs.Count == 0 && fileFlag == ' ' {
|
||||
fileFlag = 'e'
|
||||
}
|
||||
}
|
||||
if u.showGraph {
|
||||
bars := (size + perBar/2 - 1) / perBar
|
||||
bars := (attrs.Size + perBar/2 - 1) / perBar
|
||||
// clip if necessary - only happens during startup
|
||||
if bars > 10 {
|
||||
bars = 10
|
||||
@@ -408,7 +425,7 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||
}
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
y++
|
||||
}
|
||||
}
|
||||
@@ -559,14 +576,14 @@ type ncduSort struct {
|
||||
// Less is part of sort.Interface.
|
||||
func (ds *ncduSort) Less(i, j int) bool {
|
||||
var iAvgSize, jAvgSize float64
|
||||
isize, icount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jsize, jcount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iattrs, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jattrs, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
|
||||
if icount > 0 {
|
||||
iAvgSize = float64(isize / icount)
|
||||
if iattrs.Count > 0 {
|
||||
iAvgSize = iattrs.AverageSize()
|
||||
}
|
||||
if jcount > 0 {
|
||||
jAvgSize = float64(jsize / jcount)
|
||||
if jattrs.Count > 0 {
|
||||
jAvgSize = jattrs.AverageSize()
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -575,33 +592,33 @@ func (ds *ncduSort) Less(i, j int) bool {
|
||||
case ds.u.sortByName > 0:
|
||||
break
|
||||
case ds.u.sortBySize < 0:
|
||||
if isize != jsize {
|
||||
return isize < jsize
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size < jattrs.Size
|
||||
}
|
||||
case ds.u.sortBySize > 0:
|
||||
if isize != jsize {
|
||||
return isize > jsize
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
case ds.u.sortByCount < 0:
|
||||
if icount != jcount {
|
||||
return icount < jcount
|
||||
if iattrs.Count != jattrs.Count {
|
||||
return iattrs.Count < jattrs.Count
|
||||
}
|
||||
case ds.u.sortByCount > 0:
|
||||
if icount != jcount {
|
||||
return icount > jcount
|
||||
if iattrs.Count != jattrs.Count {
|
||||
return iattrs.Count > jattrs.Count
|
||||
}
|
||||
case ds.u.sortByAverageSize < 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize < jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize < jsize
|
||||
return iattrs.Size < jattrs.Size
|
||||
case ds.u.sortByAverageSize > 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize > jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize > jsize
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
// if everything equal, sort by name
|
||||
return iname < jname
|
||||
|
||||
@@ -16,14 +16,42 @@ type Dir struct {
|
||||
parent *Dir
|
||||
path string
|
||||
mu sync.Mutex
|
||||
count int64
|
||||
size int64
|
||||
count int64
|
||||
countUnknownSize int64
|
||||
entries fs.DirEntries
|
||||
dirs map[string]*Dir
|
||||
readError error
|
||||
entriesHaveErrors bool
|
||||
}
|
||||
|
||||
// Attrs contains accumulated properties for a directory entry
|
||||
//
|
||||
// Files with unknown size are counted separately but also included
|
||||
// in the total count. They are not included in the size, i.e. treated
|
||||
// as empty files, which means the size may be underestimated.
|
||||
type Attrs struct {
|
||||
Size int64
|
||||
Count int64
|
||||
CountUnknownSize int64
|
||||
IsDir bool
|
||||
Readable bool
|
||||
EntriesHaveErrors bool
|
||||
}
|
||||
|
||||
// AverageSize calculates average size of files in directory
|
||||
//
|
||||
// If there are files with unknown size, this returns the average over
|
||||
// files with known sizes, which means it may be under- or
|
||||
// overestimated.
|
||||
func (a *Attrs) AverageSize() float64 {
|
||||
countKnownSize := a.Count - a.CountUnknownSize
|
||||
if countKnownSize > 0 {
|
||||
return float64(a.Size) / float64(countKnownSize)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Parent returns the directory above this one
|
||||
func (d *Dir) Parent() *Dir {
|
||||
// no locking needed since these are write once in newDir()
|
||||
@@ -49,7 +77,13 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
||||
for _, entry := range entries {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
d.count++
|
||||
d.size += o.Size()
|
||||
size := o.Size()
|
||||
if size < 0 {
|
||||
// Some backends may return -1 because size of object is not known
|
||||
d.countUnknownSize++
|
||||
} else {
|
||||
d.size += size
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set my directory entry in parent
|
||||
@@ -62,8 +96,9 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
||||
// Accumulate counts in parents
|
||||
for ; parent != nil; parent = parent.parent {
|
||||
parent.mu.Lock()
|
||||
parent.count += d.count
|
||||
parent.size += d.size
|
||||
parent.count += d.count
|
||||
parent.countUnknownSize += d.countUnknownSize
|
||||
if d.readError != nil {
|
||||
parent.entriesHaveErrors = true
|
||||
}
|
||||
@@ -91,17 +126,24 @@ func (d *Dir) Remove(i int) {
|
||||
// Call with d.mu held
|
||||
func (d *Dir) remove(i int) {
|
||||
size := d.entries[i].Size()
|
||||
countUnknownSize := int64(0)
|
||||
if size < 0 {
|
||||
size = 0
|
||||
countUnknownSize = 1
|
||||
}
|
||||
count := int64(1)
|
||||
|
||||
subDir, ok := d.getDir(i)
|
||||
if ok {
|
||||
size = subDir.size
|
||||
count = subDir.count
|
||||
countUnknownSize = subDir.countUnknownSize
|
||||
delete(d.dirs, path.Base(subDir.path))
|
||||
}
|
||||
|
||||
d.size -= size
|
||||
d.count -= count
|
||||
d.countUnknownSize -= countUnknownSize
|
||||
d.entries = append(d.entries[:i], d.entries[i+1:]...)
|
||||
|
||||
dir := d
|
||||
@@ -111,6 +153,7 @@ func (d *Dir) remove(i int) {
|
||||
parent.dirs[path.Base(dir.path)] = dir
|
||||
parent.size -= size
|
||||
parent.count -= count
|
||||
parent.countUnknownSize -= countUnknownSize
|
||||
dir = parent
|
||||
parent.mu.Unlock()
|
||||
}
|
||||
@@ -151,19 +194,19 @@ func (d *Dir) Attr() (size int64, count int64) {
|
||||
}
|
||||
|
||||
// AttrI returns the size, count and flags for the i-th directory entry
|
||||
func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool, entriesHaveErrors bool, err error) {
|
||||
func (d *Dir) AttrI(i int) (attrs Attrs, err error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
subDir, isDir := d.getDir(i)
|
||||
|
||||
if !isDir {
|
||||
return d.entries[i].Size(), 0, false, true, d.entriesHaveErrors, d.readError
|
||||
return Attrs{d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
|
||||
}
|
||||
if subDir == nil {
|
||||
return 0, 0, true, false, false, nil
|
||||
return Attrs{0, 0, 0, true, false, false}, nil
|
||||
}
|
||||
size, count = subDir.Attr()
|
||||
return size, count, true, true, subDir.entriesHaveErrors, subDir.readError
|
||||
size, count := subDir.Attr()
|
||||
return Attrs{size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
|
||||
}
|
||||
|
||||
// Scan the Fs passed in, returning a root directory channel and an
|
||||
|
||||
@@ -290,7 +290,7 @@ func list(ctx context.Context) error {
|
||||
if !ok {
|
||||
return errors.New("bad JSON")
|
||||
}
|
||||
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], strings.Replace(info["Path"].(string), "/", "-", -1))
|
||||
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], strings.ReplaceAll(info["Path"].(string), "/", "-"))
|
||||
fmt.Printf("%s\n\n", info["Help"])
|
||||
if authRequired := info["AuthRequired"]; authRequired != nil {
|
||||
if authRequired.(bool) {
|
||||
|
||||
@@ -304,8 +304,8 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
|
||||
}
|
||||
|
||||
func testMountAPI(t *testing.T, sockAddr string) {
|
||||
// Disable tests under macOS and the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Disable tests under macOS and linux in the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "linux" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil {
|
||||
|
||||
@@ -274,7 +274,6 @@ func (vol *Volume) mount(id string) error {
|
||||
if _, err := vol.mnt.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
vol.mnt.MountedOn = time.Now()
|
||||
vol.mountReqs[id] = nil
|
||||
vol.drv.monChan <- false // ask monitor to refresh channels
|
||||
return nil
|
||||
|
||||
@@ -16,6 +16,7 @@ TestFichier:
|
||||
TestFTP:
|
||||
TestGoogleCloudStorage:
|
||||
TestHubic:
|
||||
TestNetStorage:
|
||||
TestOneDrive:
|
||||
TestPcloud:
|
||||
TestQingStor:
|
||||
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -113,14 +111,7 @@ func TestResticHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the tempdir is properly removed
|
||||
defer func() {
|
||||
err := os.RemoveAll(tempdir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
tempdir := t.TempDir()
|
||||
|
||||
// globally set append-only mode
|
||||
prev := appendOnly
|
||||
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -35,14 +33,7 @@ func TestResticPrivateRepositories(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the tempdir is properly removed
|
||||
defer func() {
|
||||
err := os.RemoveAll(tempdir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
tempdir := t.TempDir()
|
||||
|
||||
// globally set private-repos mode & test user
|
||||
prev := privateRepos
|
||||
|
||||
@@ -43,7 +43,7 @@ var shellUnEscapeRegex = regexp.MustCompile(`\\(.)`)
|
||||
|
||||
// Unescape a string that was escaped by rclone
|
||||
func shellUnEscape(str string) string {
|
||||
str = strings.Replace(str, "'\n'", "\n", -1)
|
||||
str = strings.ReplaceAll(str, "'\n'", "\n")
|
||||
str = shellUnEscapeRegex.ReplaceAllString(str, `$1`)
|
||||
return str
|
||||
}
|
||||
|
||||
@@ -24,26 +24,51 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "size remote:path",
|
||||
Short: `Prints the total size and number of objects in remote:path.`,
|
||||
Long: `
|
||||
Counts objects in the path and calculates the total size. Prints the
|
||||
result to standard output.
|
||||
|
||||
By default the output is in human-readable format, but shows values in
|
||||
both human-readable format as well as the raw numbers (global option
|
||||
` + "`--human-readable`" + ` is not considered). Use option ` + "`--json`" + `
|
||||
to format output as JSON instead.
|
||||
|
||||
Recurses by default, use ` + "`--max-depth 1`" + ` to stop the
|
||||
recursion.
|
||||
|
||||
Some backends do not always provide file sizes, see for example
|
||||
[Google Photos](/googlephotos/#size) and
|
||||
[Google Drive](/drive/#limitations-of-google-docs).
|
||||
Rclone will then show a notice in the log indicating how many such
|
||||
files were encountered, and count them in as empty files in the output
|
||||
of the size command.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
var err error
|
||||
var results struct {
|
||||
Count int64 `json:"count"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Count int64 `json:"count"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Sizeless int64 `json:"sizeless"`
|
||||
}
|
||||
|
||||
results.Count, results.Bytes, err = operations.Count(context.Background(), fsrc)
|
||||
results.Count, results.Bytes, results.Sizeless, err = operations.Count(context.Background(), fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if results.Sizeless > 0 {
|
||||
fs.Logf(fsrc, "Size may be underestimated due to %d objects with unknown size", results.Sizeless)
|
||||
}
|
||||
if jsonOutput {
|
||||
return json.NewEncoder(os.Stdout).Encode(results)
|
||||
}
|
||||
fmt.Printf("Total objects: %s (%d)\n", fs.CountSuffix(results.Count), results.Count)
|
||||
fmt.Printf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
||||
if results.Sizeless > 0 {
|
||||
fmt.Printf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
},
|
||||
|
||||
@@ -8,6 +8,7 @@ exec rclone --check-normalization=true --check-control=true --check-length=true
|
||||
TestDrive:testInfo \
|
||||
TestDropbox:testInfo \
|
||||
TestGoogleCloudStorage:rclone-testinfo \
|
||||
TestnStorage:testInfo \
|
||||
TestOneDrive:testInfo \
|
||||
TestS3:rclone-testinfo \
|
||||
TestSftp:testInfo \
|
||||
|
||||
@@ -5,6 +5,7 @@ package makefiles
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -16,7 +17,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -29,37 +32,51 @@ var (
|
||||
minFileNameLength = 4
|
||||
maxFileNameLength = 12
|
||||
seed = int64(1)
|
||||
zero = false
|
||||
sparse = false
|
||||
ascii = false
|
||||
pattern = false
|
||||
chargen = false
|
||||
|
||||
// Globals
|
||||
randSource *rand.Rand
|
||||
source io.Reader
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.IntVarP(cmdFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create")
|
||||
flags.IntVarP(cmdFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory")
|
||||
flags.IntVarP(cmdFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy")
|
||||
flags.FVarP(cmdFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create")
|
||||
flags.FVarP(cmdFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create")
|
||||
flags.IntVarP(cmdFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names")
|
||||
flags.IntVarP(cmdFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names")
|
||||
flags.Int64VarP(cmdFlags, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)")
|
||||
test.Command.AddCommand(makefilesCmd)
|
||||
makefilesFlags := makefilesCmd.Flags()
|
||||
flags.IntVarP(makefilesFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create")
|
||||
flags.IntVarP(makefilesFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory")
|
||||
flags.IntVarP(makefilesFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy")
|
||||
flags.FVarP(makefilesFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create")
|
||||
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create")
|
||||
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names")
|
||||
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names")
|
||||
|
||||
test.Command.AddCommand(makefileCmd)
|
||||
makefileFlags := makefileCmd.Flags()
|
||||
|
||||
// Common flags to makefiles and makefile
|
||||
for _, f := range []*pflag.FlagSet{makefilesFlags, makefileFlags} {
|
||||
flags.Int64VarP(f, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)")
|
||||
flags.BoolVarP(f, &zero, "zero", "", zero, "Fill files with ASCII 0x00")
|
||||
flags.BoolVarP(f, &sparse, "sparse", "", sparse, "Make the files sparse (appear to be filled with ASCII 0x00)")
|
||||
flags.BoolVarP(f, &ascii, "ascii", "", ascii, "Fill files with random ASCII printable bytes only")
|
||||
flags.BoolVarP(f, &pattern, "pattern", "", pattern, "Fill files with a periodic pattern")
|
||||
flags.BoolVarP(f, &chargen, "chargen", "", chargen, "Fill files with a ASCII chargen pattern")
|
||||
}
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
var makefilesCmd = &cobra.Command{
|
||||
Use: "makefiles <dir>",
|
||||
Short: `Make a random file hierarchy in a directory`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
if seed == 0 {
|
||||
seed = time.Now().UnixNano()
|
||||
fs.Logf(nil, "Using random seed = %d", seed)
|
||||
}
|
||||
randSource = rand.New(rand.NewSource(seed))
|
||||
commonInit()
|
||||
outputDirectory := args[0]
|
||||
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
||||
averageSize := (minFileSize + maxFileSize) / 2
|
||||
@@ -73,13 +90,130 @@ var commandDefinition = &cobra.Command{
|
||||
totalBytes := int64(0)
|
||||
for i := 0; i < numberOfFiles; i++ {
|
||||
dir := dirs[randSource.Intn(len(dirs))]
|
||||
totalBytes += writeFile(dir, fileName())
|
||||
size := int64(minFileSize)
|
||||
if maxFileSize > minFileSize {
|
||||
size += randSource.Int63n(int64(maxFileSize - minFileSize))
|
||||
}
|
||||
writeFile(dir, fileName(), size)
|
||||
totalBytes += size
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %viB in %v at %viB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
},
|
||||
}
|
||||
|
||||
var makefileCmd = &cobra.Command{
|
||||
Use: "makefile <size> [<file>]+ [flags]",
|
||||
Short: `Make files with random contents of the size given`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1e6, command, args)
|
||||
commonInit()
|
||||
var size fs.SizeSuffix
|
||||
err := size.Set(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse size %q: %v", args[0], err)
|
||||
}
|
||||
start := time.Now()
|
||||
fs.Logf(nil, "Creating %d files of size %v.", len(args[1:]), size)
|
||||
totalBytes := int64(0)
|
||||
for _, filePath := range args[1:] {
|
||||
dir := filepath.Dir(filePath)
|
||||
name := filepath.Base(filePath)
|
||||
writeFile(dir, name, int64(size))
|
||||
totalBytes += int64(size)
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
},
|
||||
}
|
||||
|
||||
func bool2int(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// common initialisation for makefiles and makefile
|
||||
func commonInit() {
|
||||
if seed == 0 {
|
||||
seed = time.Now().UnixNano()
|
||||
fs.Logf(nil, "Using random seed = %d", seed)
|
||||
}
|
||||
randSource = rand.New(rand.NewSource(seed))
|
||||
if bool2int(zero)+bool2int(sparse)+bool2int(ascii)+bool2int(pattern)+bool2int(chargen) > 1 {
|
||||
log.Fatal("Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen")
|
||||
}
|
||||
switch {
|
||||
case zero, sparse:
|
||||
source = zeroReader{}
|
||||
case ascii:
|
||||
source = asciiReader{}
|
||||
case pattern:
|
||||
source = readers.NewPatternReader(math.MaxInt64)
|
||||
case chargen:
|
||||
source = &chargenReader{}
|
||||
default:
|
||||
source = randSource
|
||||
}
|
||||
if minFileSize > maxFileSize {
|
||||
maxFileSize = minFileSize
|
||||
}
|
||||
}
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
// Read a chunk of zeroes
|
||||
func (zeroReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
p[i] = 0
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type asciiReader struct{}
|
||||
|
||||
// Read a chunk of printable ASCII characters
|
||||
func (asciiReader) Read(p []byte) (n int, err error) {
|
||||
n, err = randSource.Read(p)
|
||||
for i := range p[:n] {
|
||||
p[i] = (p[i] % (0x7F - 0x20)) + 0x20
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
type chargenReader struct {
|
||||
start byte // offset from startChar to start line with
|
||||
written byte // chars in line so far
|
||||
}
|
||||
|
||||
// Read a chunk of printable ASCII characters in chargen format
|
||||
func (r *chargenReader) Read(p []byte) (n int, err error) {
|
||||
const (
|
||||
startChar = 0x20 // ' '
|
||||
endChar = 0x7E // '~' inclusive
|
||||
charsPerLine = 72
|
||||
)
|
||||
for i := range p {
|
||||
if r.written >= charsPerLine {
|
||||
r.start++
|
||||
if r.start > endChar-startChar {
|
||||
r.start = 0
|
||||
}
|
||||
p[i] = '\n'
|
||||
r.written = 0
|
||||
} else {
|
||||
c := r.start + r.written + startChar
|
||||
if c > endChar {
|
||||
c -= endChar - startChar + 1
|
||||
}
|
||||
p[i] = c
|
||||
r.written++
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
@@ -134,7 +268,7 @@ func (d *dir) list(path string, output []string) []string {
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string) int64 {
|
||||
func writeFile(dir, name string, size int64) {
|
||||
err := file.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
@@ -144,8 +278,11 @@ func writeFile(dir, name string) int64 {
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
size := randSource.Int63n(int64(maxFileSize-minFileSize)) + int64(minFileSize)
|
||||
_, err = io.CopyN(fd, randSource, size)
|
||||
if sparse {
|
||||
err = fd.Truncate(size)
|
||||
} else {
|
||||
_, err = io.CopyN(fd, source, size)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
@@ -154,5 +291,4 @@ func writeFile(dir, name string) int64 {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
fs.Infof(path, "Written file size %v", fs.SizeSuffix(size))
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ var commandDefinition = &cobra.Command{
|
||||
cmd.Run(false, false, command, func() error {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(context.Background())
|
||||
objects, _, err := operations.Count(ctx, fsrc)
|
||||
objects, _, _, err := operations.Count(ctx, fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,11 +5,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -63,13 +65,32 @@ then add the ` + "`--localtime`" + ` flag.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f, fileName := cmd.NewFsFile(args[0])
|
||||
f, remote := newFsDst(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return Touch(context.Background(), f, fileName)
|
||||
return Touch(context.Background(), f, remote)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// newFsDst creates a new dst fs from the arguments.
|
||||
//
|
||||
// The returned fs will never point to a file. It will point to the
|
||||
// parent directory of specified path, and is returned together with
|
||||
// the basename of file or directory, except if argument is only a
|
||||
// remote name. Similar to cmd.NewFsDstFile, but without raising fatal
|
||||
// when name of file or directory is empty (e.g. "remote:" or "remote:path/").
|
||||
func newFsDst(args []string) (f fs.Fs, remote string) {
|
||||
root, remote, err := fspath.Split(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Parsing %q failed: %v", args[0], err)
|
||||
}
|
||||
if root == "" {
|
||||
root = "."
|
||||
}
|
||||
f = cmd.NewFsDir([]string{root})
|
||||
return f, remote
|
||||
}
|
||||
|
||||
// parseTimeArgument parses a timestamp string according to specific layouts
|
||||
func parseTimeArgument(timeString string) (time.Time, error) {
|
||||
layout := defaultLayout
|
||||
@@ -107,47 +128,51 @@ func createEmptyObject(ctx context.Context, remote string, modTime time.Time, f
|
||||
}
|
||||
|
||||
// Touch create new file or change file modification time.
|
||||
func Touch(ctx context.Context, f fs.Fs, fileName string) error {
|
||||
func Touch(ctx context.Context, f fs.Fs, remote string) error {
|
||||
t, err := timeOfTouch()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(nil, "Touch time %v", t)
|
||||
file, err := f.NewObject(ctx, fileName)
|
||||
file, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorObjectNotFound) {
|
||||
// Touch single non-existent file
|
||||
// Touching non-existant path, possibly creating it as new file
|
||||
if remote == "" {
|
||||
fs.Logf(f, "Not touching empty directory")
|
||||
return nil
|
||||
}
|
||||
if notCreateNewFile {
|
||||
fs.Logf(f, "Not touching non-existent file due to --no-create")
|
||||
return nil
|
||||
}
|
||||
if recursive {
|
||||
// For consistency, --recursive never creates new files.
|
||||
fs.Logf(f, "Not touching non-existent file due to --recursive")
|
||||
return nil
|
||||
}
|
||||
if operations.SkipDestructive(ctx, f, "touch (create)") {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(f, "Touching (creating)")
|
||||
if err = createEmptyObject(ctx, fileName, t, f); err != nil {
|
||||
fs.Debugf(f, "Touching (creating) %q", remote)
|
||||
if err = createEmptyObject(ctx, remote, t, f); err != nil {
|
||||
return fmt.Errorf("failed to touch (create): %w", err)
|
||||
}
|
||||
}
|
||||
if errors.Is(err, fs.ErrorIsDir) {
|
||||
// Touching existing directory
|
||||
if recursive {
|
||||
// Touch existing directory, recursive
|
||||
fs.Debugf(nil, "Touching files in directory recursively")
|
||||
return operations.TouchDir(ctx, f, t, true)
|
||||
fs.Debugf(f, "Touching recursively files in directory %q", remote)
|
||||
return operations.TouchDir(ctx, f, remote, t, true)
|
||||
}
|
||||
// Touch existing directory without recursing
|
||||
fs.Debugf(nil, "Touching files in directory non-recursively")
|
||||
return operations.TouchDir(ctx, f, t, false)
|
||||
fs.Debugf(f, "Touching non-recursively files in directory %q", remote)
|
||||
return operations.TouchDir(ctx, f, remote, t, false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Touch single existing file
|
||||
if !operations.SkipDestructive(ctx, fileName, "touch") {
|
||||
fs.Debugf(f, "Touching %q", fileName)
|
||||
if !operations.SkipDestructive(ctx, remote, "touch") {
|
||||
fs.Debugf(f, "Touching %q", remote)
|
||||
err = file.SetModTime(ctx, t)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to touch: %w", err)
|
||||
|
||||
@@ -113,6 +113,15 @@ func TestTouchCreateMultipleDirAndFile(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"a", "a/b"}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestTouchEmptyName(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := Touch(context.Background(), r.Fremote, "")
|
||||
require.NoError(t, err)
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestTouchEmptyDir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@@ -102,8 +102,7 @@ var envInitial []string
|
||||
// sets testConfig to testFolder/rclone.config.
|
||||
func createTestEnvironment(t *testing.T) {
|
||||
//Set temporary folder for config and test data
|
||||
tempFolder, err := ioutil.TempDir("", "rclone_cmdtest_")
|
||||
require.NoError(t, err)
|
||||
tempFolder := t.TempDir()
|
||||
testFolder = filepath.ToSlash(tempFolder)
|
||||
|
||||
// Set path to temporary config file
|
||||
|
||||
@@ -105,15 +105,20 @@ WebDAV or S3, that work out of the box.)
|
||||
|
||||
{{< provider_list >}}
|
||||
{{< provider name="1Fichier" home="https://1fichier.com/" config="/fichier/" start="true">}}
|
||||
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
|
||||
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
{{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" note="#status">}}
|
||||
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
{{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
|
||||
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
{{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}}
|
||||
{{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud-object-storage-aos" >}}
|
||||
{{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}}
|
||||
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}}
|
||||
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Cloudflare R2" home="https://blog.cloudflare.com/r2-open-beta/" config="/s3/#cloudflare-r2" >}}
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
@@ -124,6 +129,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
@@ -148,12 +154,13 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
|
||||
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
@@ -163,7 +170,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="The local filesystem" home="/local/" config="/local/" end="true">}}
|
||||
{{< /provider_list >}}
|
||||
|
||||
Links
|
||||
## Links
|
||||
|
||||
* {{< icon "fa fa-home" >}} [Home page](https://rclone.org/)
|
||||
* {{< icon "fab fa-github" >}} [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
|
||||
|
||||
@@ -99,9 +99,11 @@ Remote or path to alias.
|
||||
|
||||
Can be "myremote:path/to/dir", "myremote:bucket", "myremote:" or "/local/path".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_ALIAS_REMOTE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
@@ -168,10 +168,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ACD_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-client-secret
|
||||
|
||||
@@ -179,10 +181,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ACD_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -192,10 +196,12 @@ Here are the advanced options specific to amazon cloud drive (Amazon Drive).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ACD_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-auth-url
|
||||
|
||||
@@ -203,10 +209,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ACD_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-token-url
|
||||
|
||||
@@ -214,19 +222,23 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ACD_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-checkpoint
|
||||
|
||||
Checkpoint for internal polling (debug).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: checkpoint
|
||||
- Env Var: RCLONE_ACD_CHECKPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-upload-wait-per-gb
|
||||
|
||||
@@ -252,6 +264,8 @@ of big files for a range of file sizes.
|
||||
Upload with the "-v" flag to see more info about what rclone is doing
|
||||
in this situation.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_wait_per_gb
|
||||
- Env Var: RCLONE_ACD_UPLOAD_WAIT_PER_GB
|
||||
- Type: Duration
|
||||
@@ -270,6 +284,8 @@ To download files above this threshold, rclone requests a "tempLink"
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: templink_threshold
|
||||
- Env Var: RCLONE_ACD_TEMPLINK_THRESHOLD
|
||||
- Type: SizeSuffix
|
||||
@@ -277,10 +293,12 @@ underlying S3 storage.
|
||||
|
||||
#### --acd-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_ACD_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -316,6 +334,5 @@ this capability cannot determine free space for an rclone mount or
|
||||
use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
||||
See [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
|
||||
@@ -557,3 +557,51 @@ put them back in again.` >}}
|
||||
* Logeshwaran Murugesan <logeshwaran@testpress.in>
|
||||
* Lu Wang <coolwanglu@gmail.com>
|
||||
* Bumsu Hyeon <ksitht@gmail.com>
|
||||
* Shmz Ozggrn <98463324+ShmzOzggrn@users.noreply.github.com>
|
||||
* Kim <kim@jotta.no>
|
||||
* Niels van de Weem <n.van.de.weem@smile.nl>
|
||||
* Koopa <codingkoopa@gmail.com>
|
||||
* Yunhai Luo <yunhai-luo@hotmail.com>
|
||||
* Charlie Jiang <w@chariri.moe>
|
||||
* Alain Nussbaumer <alain.nussbaumer@alleluia.ch>
|
||||
* Vanessasaurus <814322+vsoch@users.noreply.github.com>
|
||||
* Isaac Levy <isaac.r.levy@gmail.com>
|
||||
* Gourav T <workflowautomation@protonmail.com>
|
||||
* Paulo Martins <paulo.pontes.m@gmail.com>
|
||||
* viveknathani <viveknathani2402@gmail.com>
|
||||
* Eng Zer Jun <engzerjun@gmail.com>
|
||||
* Abhiraj <abhiraj.official15@gmail.com>
|
||||
* Márton Elek <elek@apache.org> <elek@users.noreply.github.com>
|
||||
* Vincent Murphy <vdm@vdm.ie>
|
||||
* ctrl-q <34975747+ctrl-q@users.noreply.github.com>
|
||||
* Nil Alexandrov <nalexand@akamai.com>
|
||||
* GuoXingbin <101376330+guoxingbin@users.noreply.github.com>
|
||||
* Berkan Teber <berkan@berkanteber.com>
|
||||
* Tobias Klauser <tklauser@distanz.ch>
|
||||
* KARBOWSKI Piotr <piotr.karbowski@gmail.com>
|
||||
* GH <geeklihui@foxmail.com>
|
||||
* rafma0 <int.main@gmail.com>
|
||||
* Adrien Rey-Jarthon <jobs@adrienjarthon.com>
|
||||
* Nick Gooding <73336146+nickgooding@users.noreply.github.com>
|
||||
* Leroy van Logchem <lr.vanlogchem@gmail.com>
|
||||
* Zsolt Ero <zsolt.ero@gmail.com>
|
||||
* Lesmiscore <nao20010128@gmail.com>
|
||||
* ehsantdy <ehsan.tadayon@arvancloud.com>
|
||||
* SwazRGB <65694696+swazrgb@users.noreply.github.com>
|
||||
* Mateusz Puczyński <mati6095@gmail.com>
|
||||
* Michael C Tiernan - MIT-Research Computing Project <mtiernan@mit.edu>
|
||||
* Kaspian <34658474+KaspianDev@users.noreply.github.com>
|
||||
* Werner <EvilOlaf@users.noreply.github.com>
|
||||
* Hugal31 <hugo.laloge@gmail.com>
|
||||
* Christian Galo <36752715+cgalo5758@users.noreply.github.com>
|
||||
* Erik van Velzen <erik@evanv.nl>
|
||||
* Derek Battams <derek@battams.ca>
|
||||
* SimonLiu <simonliu009@users.noreply.github.com>
|
||||
* Hugo Laloge <hla@lescompanions.com>
|
||||
* Mr-Kanister <68117355+Mr-Kanister@users.noreply.github.com>
|
||||
* Rob Pickerill <r.pickerill@gmail.com>
|
||||
* Andrey <to.merge@gmail.com>
|
||||
* Eric Wolf <19wolf@gmail.com>
|
||||
* Nick <nick.naumann@mailbox.tu-dresden.de>
|
||||
* Jason Zheng <jszheng17@gmail.com>
|
||||
* Matthew Vernon <mvernon@wikimedia.org>
|
||||
|
||||
@@ -166,10 +166,12 @@ Storage Account Name.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_AZUREBLOB_ACCOUNT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-service-principal-file
|
||||
|
||||
@@ -185,10 +187,12 @@ Leave blank normally. Needed only if you want to use a service principal instead
|
||||
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_principal_file
|
||||
- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-key
|
||||
|
||||
@@ -196,10 +200,12 @@ Storage Account Key.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: key
|
||||
- Env Var: RCLONE_AZUREBLOB_KEY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-sas-url
|
||||
|
||||
@@ -207,10 +213,12 @@ SAS URL for container level access only.
|
||||
|
||||
Leave blank if using account/key or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sas_url
|
||||
- Env Var: RCLONE_AZUREBLOB_SAS_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-use-msi
|
||||
|
||||
@@ -225,6 +233,8 @@ the user-assigned identity will be used by default. If the resource has multiple
|
||||
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||
msi_client_id, or msi_mi_res_id parameters.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_msi
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_MSI
|
||||
- Type: bool
|
||||
@@ -236,6 +246,8 @@ Uses local storage emulator if provided as 'true'.
|
||||
|
||||
Leave blank if using real azure storage endpoint.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_emulator
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR
|
||||
- Type: bool
|
||||
@@ -251,10 +263,12 @@ Object ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_client_id or msi_mi_res_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_object_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_OBJECT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-msi-client-id
|
||||
|
||||
@@ -262,10 +276,12 @@ Object ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_object_id or msi_mi_res_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_client_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-msi-mi-res-id
|
||||
|
||||
@@ -273,10 +289,12 @@ Azure resource ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_client_id or msi_object_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_mi_res_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_MI_RES_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-endpoint
|
||||
|
||||
@@ -284,32 +302,65 @@ Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_AZUREBLOB_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CUTOFF
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-chunk-size
|
||||
|
||||
Upload chunk size (<= 100 MiB).
|
||||
Upload chunk size.
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_AZUREBLOB_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 4Mi
|
||||
|
||||
#### --azureblob-upload-concurrency
|
||||
|
||||
Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed
|
||||
links and these uploads do not fully utilize your bandwidth, then
|
||||
increasing this may help to speed up the transfers.
|
||||
|
||||
In tests, upload speed increases almost linearly with upload
|
||||
concurrency. For example to fill a gigabit pipe it may be necessary to
|
||||
raise this to 64. Note that this will use more memory.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 16
|
||||
|
||||
#### --azureblob-list-chunk
|
||||
|
||||
Size of blob list.
|
||||
@@ -322,6 +373,8 @@ minutes per megabyte on average, it will time out (
|
||||
). This can be used to limit the number of blobs items to return, to
|
||||
avoid the time out.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_AZUREBLOB_LIST_CHUNK
|
||||
- Type: int
|
||||
@@ -342,10 +395,12 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_tier
|
||||
- Env Var: RCLONE_AZUREBLOB_ACCESS_TIER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-archive-tier-delete
|
||||
|
||||
@@ -364,6 +419,8 @@ replacement. This has the potential for data loss if the upload fails
|
||||
archive tier blobs early may be chargable.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: archive_tier_delete
|
||||
- Env Var: RCLONE_AZUREBLOB_ARCHIVE_TIER_DELETE
|
||||
- Type: bool
|
||||
@@ -378,6 +435,8 @@ uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_AZUREBLOB_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
@@ -390,6 +449,8 @@ How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_flush_time
|
||||
- Env Var: RCLONE_AZUREBLOB_MEMORY_POOL_FLUSH_TIME
|
||||
- Type: Duration
|
||||
@@ -399,6 +460,8 @@ This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_use_mmap
|
||||
- Env Var: RCLONE_AZUREBLOB_MEMORY_POOL_USE_MMAP
|
||||
- Type: bool
|
||||
@@ -406,10 +469,12 @@ Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
#### --azureblob-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_AZUREBLOB_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -419,10 +484,12 @@ See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Public access level of a container: blob or container.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: public_access
|
||||
- Env Var: RCLONE_AZUREBLOB_PUBLIC_ACCESS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- The container and its blobs can be accessed only with an authorized request.
|
||||
@@ -436,6 +503,8 @@ Public access level of a container: blob or container.
|
||||
|
||||
If set, do not do HEAD before GET when getting objects.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_head_object
|
||||
- Env Var: RCLONE_AZUREBLOB_NO_HEAD_OBJECT
|
||||
- Type: bool
|
||||
@@ -453,8 +522,7 @@ this capability cannot determine free space for an rclone mount or
|
||||
use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
||||
See [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
## Azure Storage Emulator Support
|
||||
|
||||
|
||||
@@ -173,6 +173,11 @@ the file instead of hiding it.
|
||||
Old versions of files, where available, are visible using the
|
||||
`--b2-versions` flag.
|
||||
|
||||
It is also possible to view a bucket as it was at a certain point in time,
|
||||
using the `--b2-version-at` flag. This will show the file versions as they
|
||||
were at that time, showing files that have been deleted afterwards, and
|
||||
hiding files that were created since.
|
||||
|
||||
If you wish to remove all the old versions then you can use the
|
||||
`rclone cleanup remote:bucket` command which will delete all the old
|
||||
versions of files, leaving the current ones intact. You can also
|
||||
@@ -329,24 +334,30 @@ Here are the standard options specific to b2 (Backblaze B2).
|
||||
|
||||
Account ID or Application Key ID.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_B2_ACCOUNT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --b2-key
|
||||
|
||||
Application Key.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: key
|
||||
- Env Var: RCLONE_B2_KEY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --b2-hard-delete
|
||||
|
||||
Permanently delete files on remote removal, otherwise hide files.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hard_delete
|
||||
- Env Var: RCLONE_B2_HARD_DELETE
|
||||
- Type: bool
|
||||
@@ -362,10 +373,12 @@ Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_B2_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --b2-test-mode
|
||||
|
||||
@@ -381,10 +394,12 @@ below will cause b2 to return specific errors:
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: test_mode
|
||||
- Env Var: RCLONE_B2_TEST_MODE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --b2-versions
|
||||
|
||||
@@ -393,6 +408,8 @@ Include old versions in directory listings.
|
||||
Note that when using this no file write operations are permitted,
|
||||
so you can't upload files or delete them.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: versions
|
||||
- Env Var: RCLONE_B2_VERSIONS
|
||||
- Type: bool
|
||||
@@ -406,6 +423,8 @@ Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657 GiB (== 5 GB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_B2_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -420,6 +439,8 @@ copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6 GiB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_cutoff
|
||||
- Env Var: RCLONE_B2_COPY_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -436,6 +457,8 @@ might a maximum of "--transfers" chunks in progress at once.
|
||||
|
||||
5,000,000 Bytes is the minimum size.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_B2_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -450,6 +473,8 @@ uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_B2_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
@@ -466,10 +491,20 @@ If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: download_url
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --b2-download-auth-duration
|
||||
|
||||
@@ -478,6 +513,8 @@ Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: download_auth_duration
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_AUTH_DURATION
|
||||
- Type: Duration
|
||||
@@ -489,6 +526,8 @@ How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_flush_time
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_FLUSH_TIME
|
||||
- Type: Duration
|
||||
@@ -498,6 +537,8 @@ This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_use_mmap
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_USE_MMAP
|
||||
- Type: bool
|
||||
@@ -505,10 +546,12 @@ Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
#### --b2-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_B2_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -523,7 +566,6 @@ this capability cannot determine free space for an rclone mount or
|
||||
use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
||||
See [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ Optional Flags:
|
||||
|
||||
Arbitrary rclone flags may be specified on the
|
||||
[bisync command line](/commands/rclone_bisync/), for example
|
||||
`rclone bsync ./testdir/path1/ gdrive:testdir/path2/ --drive-skip-gdocs -v -v --timeout 10s`
|
||||
`rclone bisync ./testdir/path1/ gdrive:testdir/path2/ --drive-skip-gdocs -v -v --timeout 10s`
|
||||
Note that interactions of various rclone flags with bisync process flow
|
||||
has not been fully tested yet.
|
||||
|
||||
@@ -363,6 +363,7 @@ Bisync is considered _BETA_ and has been tested with the following backends:
|
||||
- OneDrive
|
||||
- S3
|
||||
- SFTP
|
||||
- Yandex Disk
|
||||
|
||||
It has not been fully tested with other services yet.
|
||||
If it works, or sorta works, please let us know and we'll update the list.
|
||||
@@ -700,7 +701,7 @@ consider using the flag
|
||||
|
||||
Google docs exist as virtual files on Google Drive and cannot be transferred
|
||||
to other filesystems natively. While it is possible to export a Google doc to
|
||||
a normal file (with `.xlsx` extension, for example), it's not possible
|
||||
a normal file (with `.xlsx` extension, for example), it is not possible
|
||||
to import a normal file back into a Google document.
|
||||
|
||||
Bisync's handling of Google Doc files is to flag them in the run log output
|
||||
|
||||
@@ -275,10 +275,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_BOX_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-client-secret
|
||||
|
||||
@@ -286,10 +288,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_BOX_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-box-config-file
|
||||
|
||||
@@ -299,10 +303,12 @@ Leave blank normally.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: box_config_file
|
||||
- Env Var: RCLONE_BOX_BOX_CONFIG_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-access-token
|
||||
|
||||
@@ -310,15 +316,19 @@ Box App Primary Access Token
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_BOX_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-box-sub-type
|
||||
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: box_sub_type
|
||||
- Env Var: RCLONE_BOX_BOX_SUB_TYPE
|
||||
- Type: string
|
||||
@@ -337,10 +347,12 @@ Here are the advanced options specific to box (Box).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_BOX_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-auth-url
|
||||
|
||||
@@ -348,10 +360,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_BOX_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-token-url
|
||||
|
||||
@@ -359,15 +373,19 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_BOX_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-root-folder-id
|
||||
|
||||
Fill in for rclone to use a non root folder as its starting point.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_BOX_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
@@ -377,6 +395,8 @@ Fill in for rclone to use a non root folder as its starting point.
|
||||
|
||||
Cutoff for switching to multipart upload (>= 50 MiB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_BOX_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -386,6 +406,8 @@ Cutoff for switching to multipart upload (>= 50 MiB).
|
||||
|
||||
Max number of times to try committing a multipart file.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: commit_retries
|
||||
- Env Var: RCLONE_BOX_COMMIT_RETRIES
|
||||
- Type: int
|
||||
@@ -395,6 +417,8 @@ Max number of times to try committing a multipart file.
|
||||
|
||||
Size of listing chunk 1-1000.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_BOX_LIST_CHUNK
|
||||
- Type: int
|
||||
@@ -404,17 +428,21 @@ Size of listing chunk 1-1000.
|
||||
|
||||
Only show items owned by the login (email address) passed in.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: owned_by
|
||||
- Env Var: RCLONE_BOX_OWNED_BY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_BOX_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -438,6 +466,5 @@ this capability cannot determine free space for an rclone mount or
|
||||
use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
||||
See [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user