mirror of
https://github.com/rclone/rclone.git
synced 2026-01-23 04:43:21 +00:00
Compare commits
275 Commits
fix-5835-c
...
fix-webdav
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dfc5b0460b | ||
|
|
26db80c270 | ||
|
|
9eb3470c9c | ||
|
|
a449dd7d1c | ||
|
|
fc4fe33703 | ||
|
|
e11bfacfcf | ||
|
|
a9c49c50a0 | ||
|
|
8979337313 | ||
|
|
7ffab5d998 | ||
|
|
3ccf222acb | ||
|
|
2781f8e2f1 | ||
|
|
3d55f69338 | ||
|
|
cc9bc2cb80 | ||
|
|
80ac59ee5b | ||
|
|
5d6a6dd6c0 | ||
|
|
c676e2139d | ||
|
|
7361c98b2d | ||
|
|
5cc47de912 | ||
|
|
6d342a3c5b | ||
|
|
336051870e | ||
|
|
38c6d022bd | ||
|
|
c138367df6 | ||
|
|
da404dc0f2 | ||
|
|
28e43fe7af | ||
|
|
3ec25f437b | ||
|
|
a34276e9b3 | ||
|
|
c2baacc0a4 | ||
|
|
fcec4bedbe | ||
|
|
813a5e0931 | ||
|
|
bd4abb15a3 | ||
|
|
7f84283539 | ||
|
|
47b1a0d6fa | ||
|
|
ce168ecac2 | ||
|
|
4f0ddb60e7 | ||
|
|
b929a56f46 | ||
|
|
74af6409d4 | ||
|
|
0e77072dcc | ||
|
|
2437eb3cce | ||
|
|
a12c94caff | ||
|
|
542c1616b8 | ||
|
|
8697f0bd26 | ||
|
|
a9f18f8093 | ||
|
|
8e5e230b81 | ||
|
|
c0985e93b7 | ||
|
|
fb4f7555c7 | ||
|
|
f2e7a2e794 | ||
|
|
9e4854955c | ||
|
|
319ac225e4 | ||
|
|
a9d3283d97 | ||
|
|
edf0412464 | ||
|
|
e6194a4b83 | ||
|
|
7f05990623 | ||
|
|
e16f2a566f | ||
|
|
a36fef8a66 | ||
|
|
6500e1d205 | ||
|
|
9f7484e4e9 | ||
|
|
0ba702ccf4 | ||
|
|
6f91198b57 | ||
|
|
cf0a72aecd | ||
|
|
f6fd6ee777 | ||
|
|
1e66d052fd | ||
|
|
e5974ac4b0 | ||
|
|
50a0c3482d | ||
|
|
389a29b017 | ||
|
|
9dcf9375e8 | ||
|
|
1d6d41fb91 | ||
|
|
a3d4307892 | ||
|
|
a446106041 | ||
|
|
607172b6ec | ||
|
|
94757277bc | ||
|
|
deab86867c | ||
|
|
c0c5b3bc6b | ||
|
|
a947f298e6 | ||
|
|
1b0128ecb2 | ||
|
|
c5395db1f1 | ||
|
|
6e5382fc99 | ||
|
|
134592adaa | ||
|
|
36e614f550 | ||
|
|
7bfed98b48 | ||
|
|
f471096fd0 | ||
|
|
4cebade95d | ||
|
|
a8cd18faf3 | ||
|
|
e34c543660 | ||
|
|
598364ad0f | ||
|
|
211dbe9aee | ||
|
|
4829527dac | ||
|
|
cc8dde402f | ||
|
|
2b67ad17aa | ||
|
|
6da3522499 | ||
|
|
97606bbdef | ||
|
|
a15885dd74 | ||
|
|
87c201c92a | ||
|
|
d77736c21a | ||
|
|
86bd5f6922 | ||
|
|
fe271a4e35 | ||
|
|
75455d4000 | ||
|
|
82e24f521f | ||
|
|
5605e34f7b | ||
|
|
06598531e0 | ||
|
|
b1d43f8d41 | ||
|
|
b53c38c9fd | ||
|
|
03715f6c6b | ||
|
|
07481396e0 | ||
|
|
bab91e4402 | ||
|
|
fde40319ef | ||
|
|
94e330d4fa | ||
|
|
087543d723 | ||
|
|
6a759d936a | ||
|
|
7c31240bb8 | ||
|
|
25146b4306 | ||
|
|
240561850b | ||
|
|
39a1e37441 | ||
|
|
4c02f50ef5 | ||
|
|
f583b86334 | ||
|
|
118e8e1470 | ||
|
|
afcea9c72b | ||
|
|
27176cc6bb | ||
|
|
f1e4b7da7b | ||
|
|
f065a267f6 | ||
|
|
17f8014909 | ||
|
|
8ba04562c3 | ||
|
|
285747b1d1 | ||
|
|
7bb8b8f4ba | ||
|
|
59c242bbf6 | ||
|
|
a2bacd7d3f | ||
|
|
9babcc4811 | ||
|
|
a0f665ec3c | ||
|
|
ecdf42c17f | ||
|
|
be9ee1d138 | ||
|
|
9e9ead2ac4 | ||
|
|
4f78226f8b | ||
|
|
54c9c3156c | ||
|
|
6ecbbf796e | ||
|
|
603e51c43f | ||
|
|
ca4671126e | ||
|
|
6ea26b508a | ||
|
|
887cccb2c1 | ||
|
|
d975196cfa | ||
|
|
1f39b28f49 | ||
|
|
2738db22fb | ||
|
|
1978ddde73 | ||
|
|
c2bfda22ab | ||
|
|
d4da9b98d6 | ||
|
|
e4f5912294 | ||
|
|
750fffdf71 | ||
|
|
388e74af52 | ||
|
|
f9354fff2f | ||
|
|
ff1f173fc2 | ||
|
|
f8073a7b63 | ||
|
|
807f1cedaa | ||
|
|
bf9c68c88a | ||
|
|
189cba0fbe | ||
|
|
69f726f16c | ||
|
|
65652f7a75 | ||
|
|
47f9ab2f56 | ||
|
|
5dd51e6149 | ||
|
|
6a6d254a9f | ||
|
|
fd453f2c7b | ||
|
|
5d06a82c5d | ||
|
|
847868b4ba | ||
|
|
38ca178cf3 | ||
|
|
9427d22f99 | ||
|
|
7b1428a498 | ||
|
|
ec72432cec | ||
|
|
2339172df2 | ||
|
|
268b808bf8 | ||
|
|
74898bac3b | ||
|
|
e0fbca02d4 | ||
|
|
21355b4208 | ||
|
|
251b84ff2c | ||
|
|
537b62917f | ||
|
|
71a784cfa2 | ||
|
|
8ee0fe9863 | ||
|
|
8f164e4df5 | ||
|
|
06ecc6511b | ||
|
|
3529bdec9b | ||
|
|
486b43f8c7 | ||
|
|
89f0e4df80 | ||
|
|
399fb5b7fb | ||
|
|
19f1ed949c | ||
|
|
d3a1001094 | ||
|
|
dc7e3ea1e3 | ||
|
|
f22b703a51 | ||
|
|
c40129d610 | ||
|
|
8dc93f1792 | ||
|
|
f4c40bf79d | ||
|
|
9cc50a614b | ||
|
|
bcb07a67f6 | ||
|
|
25ea04f1db | ||
|
|
06ffd4882d | ||
|
|
19a5e1d63b | ||
|
|
ec88b66dad | ||
|
|
aa2d7f00c2 | ||
|
|
3e125443aa | ||
|
|
3c271b8b1e | ||
|
|
6d92ba2c6c | ||
|
|
c26dc69e1b | ||
|
|
b0de0b4609 | ||
|
|
f54641511a | ||
|
|
8cf76f5e11 | ||
|
|
18c24014da | ||
|
|
0ae39bda8d | ||
|
|
051685baa1 | ||
|
|
07f53aebdc | ||
|
|
bd6d36b3f6 | ||
|
|
b168479429 | ||
|
|
b447b0cd78 | ||
|
|
4bd2386632 | ||
|
|
83b6b62c1b | ||
|
|
5826cc9d9e | ||
|
|
252432ae54 | ||
|
|
8821629333 | ||
|
|
a2092a8faf | ||
|
|
2b6f4241b4 | ||
|
|
e3dd16d490 | ||
|
|
9e1fd923f6 | ||
|
|
3684789858 | ||
|
|
1ac1dd428a | ||
|
|
65dbd29c22 | ||
|
|
164774d7e1 | ||
|
|
507020f408 | ||
|
|
a667e03fc9 | ||
|
|
1045344943 | ||
|
|
5e469db420 | ||
|
|
946e84d194 | ||
|
|
162aba60eb | ||
|
|
d8a874c32b | ||
|
|
9c451d9ac6 | ||
|
|
8f3f24672c | ||
|
|
0eb7b716d9 | ||
|
|
ee9684e60f | ||
|
|
e0cbe413e1 | ||
|
|
2523dd6220 | ||
|
|
c504d97017 | ||
|
|
b783f09fc6 | ||
|
|
a301478a13 | ||
|
|
63b450a2a5 | ||
|
|
843b77aaaa | ||
|
|
3641727edb | ||
|
|
38e2f835ed | ||
|
|
bd4bbed592 | ||
|
|
994b501188 | ||
|
|
dfa9381814 | ||
|
|
2a85feda4b | ||
|
|
ad46af9168 | ||
|
|
2fed02211c | ||
|
|
237daa8aaf | ||
|
|
8aeca6c033 | ||
|
|
fd82876086 | ||
|
|
be1a668e95 | ||
|
|
9d4eab32d8 | ||
|
|
b4ba7b69b8 | ||
|
|
deef659aef | ||
|
|
4b99e84242 | ||
|
|
06bdf7c64c | ||
|
|
e1225b5729 | ||
|
|
871cc2f62d | ||
|
|
bc23bf11db | ||
|
|
b55575e622 | ||
|
|
328f0e7135 | ||
|
|
a52814eed9 | ||
|
|
071a9e882d | ||
|
|
4e2ca3330c | ||
|
|
408d9f3e7a | ||
|
|
0681a5c86a | ||
|
|
df09c3f555 | ||
|
|
c41814fd2d | ||
|
|
c2557cc432 | ||
|
|
3425726c50 | ||
|
|
46175a22d8 | ||
|
|
bcf0e15ad7 | ||
|
|
b91c349cd5 | ||
|
|
d252816706 | ||
|
|
729117af68 | ||
|
|
cd4d8d55ec |
73
.github/workflows/build.yml
vendored
73
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -40,8 +40,8 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -49,52 +49,41 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.15
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -110,6 +99,7 @@ jobs:
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -134,7 +124,7 @@ jobs:
|
||||
run: |
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
if: matrix.os == 'macos-11'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
@@ -177,6 +167,11 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -245,14 +240,14 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18.x
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
@@ -273,8 +268,8 @@ jobs:
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
@@ -283,7 +278,7 @@ jobs:
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
@@ -296,7 +291,7 @@ jobs:
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -309,7 +304,7 @@ jobs:
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -322,7 +317,7 @@ jobs:
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
|
||||
@@ -15,7 +15,7 @@ Current active maintainers of rclone are:
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
5267
MANUAL.html
generated
5267
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7550
MANUAL.txt
generated
7550
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
10
Makefile
10
Makefile
@@ -97,7 +97,7 @@ release_dep_linux:
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@@ -245,18 +245,18 @@ retag:
|
||||
startdev:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next version is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
11
README.md
11
README.md
@@ -1,4 +1,5 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -20,14 +21,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
@@ -38,6 +44,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
@@ -65,8 +72,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
@@ -39,9 +41,9 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -43,8 +44,9 @@ import (
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
maxUploadParts = 50000 // maximum allowed number of parts/blocks in a multi-part upload
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
@@ -371,15 +373,9 @@ func (o *Object) split() (container, containerPath string) {
|
||||
|
||||
// validateAccessTier checks if azureblob supports user supplied tier
|
||||
func validateAccessTier(tier string) bool {
|
||||
switch tier {
|
||||
case string(azblob.AccessTierHot),
|
||||
string(azblob.AccessTierCool),
|
||||
string(azblob.AccessTierArchive):
|
||||
// valid cases
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(tier, string(azblob.AccessTierHot)) ||
|
||||
strings.EqualFold(tier, string(azblob.AccessTierCool)) ||
|
||||
strings.EqualFold(tier, string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
// validatePublicAccess checks if azureblob supports use supplied public access level
|
||||
@@ -612,7 +608,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.UseMSI:
|
||||
var token adal.Token
|
||||
var userMSI *userMSI = &userMSI{}
|
||||
var userMSI = &userMSI{}
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
@@ -1461,6 +1457,10 @@ func (o *Object) clearMetaData() {
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
@@ -1653,7 +1653,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
}
|
||||
}
|
||||
container, _ := o.split()
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1682,8 +1685,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
uploadParts := int64(maxUploadParts)
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
// calculate size of parts/blocks
|
||||
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
BufferSize: int(partSize),
|
||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
|
||||
@@ -61,3 +61,25 @@ func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
want bool
|
||||
}{
|
||||
"hot": {"hot", true},
|
||||
"HOT": {"HOT", true},
|
||||
"Hot": {"Hot", true},
|
||||
"cool": {"cool", true},
|
||||
"archive": {"archive", true},
|
||||
"empty": {"", false},
|
||||
"unknown": {"unknown", false},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := validateAccessTier(test.accessTier)
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,7 +64,8 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,6 +107,11 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version_at",
|
||||
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||
Default: fs.Time{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
@@ -160,7 +166,15 @@ free egress for data downloaded through the Cloudflare network.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
@@ -203,6 +217,7 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
@@ -688,9 +703,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
Method: "POST",
|
||||
Path: "/b2_list_file_names",
|
||||
}
|
||||
if hidden {
|
||||
if hidden || f.opt.VersionAt.IsSet() {
|
||||
opts.Path = "/b2_list_file_versions"
|
||||
}
|
||||
|
||||
lastFileName := ""
|
||||
|
||||
for {
|
||||
var response api.ListFileNamesResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -720,7 +738,21 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
|
||||
if f.opt.VersionAt.IsSet() {
|
||||
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
|
||||
// Ignore versions that were created after the specified time
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Name == lastFileName {
|
||||
// Ignore versions before the already returned version
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Send object
|
||||
lastFileName = file.Name
|
||||
err = fn(remote, file, isDirectory)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -1820,6 +1852,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1975,6 +2010,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -88,21 +89,19 @@ type largeUpload struct {
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
chunkSize := defaultChunkSize
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
|
||||
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@@ -394,7 +394,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
rps := rate.Inf
|
||||
if opt.Rps > 0 {
|
||||
rps = rate.Limit(float64(opt.Rps))
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if opt.PlexURL != "" {
|
||||
@@ -1743,7 +1747,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -515,7 +515,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
|
||||
strRegex := regexp.QuoteMeta(pattern)
|
||||
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
||||
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
|
||||
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
|
||||
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
||||
f.nameRegexp = regexp.MustCompile(strRegex)
|
||||
|
||||
@@ -524,7 +524,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
if numDigits > 1 {
|
||||
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
||||
}
|
||||
strFmt := strings.Replace(pattern, "%", "%%", -1)
|
||||
strFmt := strings.ReplaceAll(pattern, "%", "%%")
|
||||
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
||||
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
||||
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
||||
@@ -1895,7 +1895,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.base.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -1904,7 +1904,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.base.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -401,6 +401,10 @@ func isCompressible(r io.Reader) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
@@ -626,9 +630,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
||||
// Put the data
|
||||
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
||||
if err != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
if mo != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -900,7 +906,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp: not supported by underlying remote")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -909,7 +915,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("can't About: not supported by underlying remote")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -443,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
@@ -597,7 +597,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -606,7 +606,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
defaultXDGIcon = "text-html"
|
||||
@@ -84,7 +84,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
_mimeTypeToExtensionDuplicates = map[string]string{
|
||||
"application/x-vnd.oasis.opendocument.presentation": ".odp",
|
||||
@@ -299,6 +299,17 @@ a non root folder as its starting point.
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_shortcut_content",
|
||||
Default: false,
|
||||
Help: `Server side copy contents of shortcuts instead of the shortcut.
|
||||
|
||||
When doing server side copies, normally rclone will copy shortcuts as
|
||||
shortcuts.
|
||||
|
||||
If this flag is used then rclone will copy the contents of shortcuts
|
||||
rather than shortcuts themselves when doing server side copies.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
@@ -542,6 +553,14 @@ Google don't document so it may break in the future.
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "skip_dangling_shortcuts",
|
||||
Help: `If set skip dangling shortcut files.
|
||||
|
||||
If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
@@ -578,6 +597,7 @@ type Options struct {
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
CopyShortcutContent bool `config:"copy_shortcut_content"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
@@ -604,6 +624,7 @@ type Options struct {
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -639,6 +660,7 @@ type baseObject struct {
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents []string // IDs of the parent directories
|
||||
resourceKey *string // resourceKey is needed for link shared objects
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -808,8 +830,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if title != "" {
|
||||
searchTitle := f.opt.Enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `\`, `\\`)
|
||||
searchTitle = strings.ReplaceAll(searchTitle, `'`, `\'`)
|
||||
|
||||
var titleQuery bytes.Buffer
|
||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||
@@ -906,6 +928,11 @@ OUTER:
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
// leave the dangling shortcut out of the listings
|
||||
// we've already logged about the dangling shortcut in resolveShortcut
|
||||
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
@@ -1293,12 +1320,16 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
}
|
||||
}
|
||||
}
|
||||
return &Object{
|
||||
o := &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||
md5sum: strings.ToLower(info.Md5Checksum),
|
||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||
}
|
||||
if info.ResourceKey != "" {
|
||||
o.resourceKey = &info.ResourceKey
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// newDocumentObject creates an fs.Object for a google docs drive.File
|
||||
@@ -1571,6 +1602,15 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
}
|
||||
}
|
||||
|
||||
// If using a link type export and a more specific export
|
||||
// hasn't been found all docs should be exported
|
||||
for _, _extension := range f.exportExtensions {
|
||||
_mimeType := mime.TypeByExtension(_extension)
|
||||
if isLinkMimeType(_mimeType) {
|
||||
return _extension, _mimeType, true
|
||||
}
|
||||
}
|
||||
|
||||
// else return empty
|
||||
return "", "", isDocument
|
||||
}
|
||||
@@ -1581,6 +1621,14 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", "", false)
|
||||
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if item.Md5Checksum != "" {
|
||||
return
|
||||
}
|
||||
// Folders can't be documents
|
||||
if item.MimeType == driveFolderType {
|
||||
return
|
||||
}
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||
if extension != "" {
|
||||
filename = item.Name + extension
|
||||
@@ -2374,16 +2422,24 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
// get the ID of the thing to copy
|
||||
// copy the contents if CopyShortcutContent
|
||||
// else copy the shortcut only
|
||||
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
if f.opt.CopyShortcutContent {
|
||||
id = actualID(srcObj.id)
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
copy := f.svc.Files.Copy(id, createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
Context(ctx).Do()
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever)
|
||||
srcObj.addResourceKey(copy.Header())
|
||||
info, err = copy.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -3480,6 +3536,14 @@ func (o *baseObject) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
|
||||
// object if required.
|
||||
func (o *baseObject) addResourceKey(header http.Header) {
|
||||
if o.resourceKey != nil {
|
||||
header.Add("X-Goog-Drive-Resource-Keys", fmt.Sprintf("%s/%s", o.id, *o.resourceKey))
|
||||
}
|
||||
}
|
||||
|
||||
// httpResponse gets an http.Response object for the object
|
||||
// using the url and method passed in
|
||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
@@ -3495,6 +3559,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
o.addResourceKey(req.Header)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
|
||||
@@ -422,11 +422,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
dir := t.TempDir()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
@@ -491,19 +487,11 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempDir1 := t.TempDir()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempDir2 := t.TempDir()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -118,12 +118,12 @@ func (b *batcher) Batching() bool {
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
@@ -137,7 +137,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||
}
|
||||
return batchStatus, nil
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
@@ -199,26 +199,11 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
complete, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
|
||||
@@ -1269,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
@@ -1370,10 +1370,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
|
||||
if timeout < 30 {
|
||||
timeout = 30
|
||||
fs.Debugf(f, "Increasing poll interval to minimum 30s")
|
||||
}
|
||||
|
||||
if timeout > 480 {
|
||||
timeout = 480
|
||||
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1650,13 +1652,37 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after session is started, we retry everything
|
||||
if err != nil {
|
||||
// Check for incorrect offset error and retry with new offset
|
||||
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
|
||||
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
fs.Debugf(o, "%s: chunk received OK - continuing", what)
|
||||
return false, nil
|
||||
} else if skip > chunkSize {
|
||||
// This error should never happen
|
||||
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
|
||||
}
|
||||
// Skip the sent data on next retry
|
||||
cursor.Offset = uint64(int64(cursor.Offset) + delta)
|
||||
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1760,7 +1786,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -42,18 +42,15 @@ func init() {
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -517,6 +514,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/user/info.cgi",
|
||||
ContentType: "application/json",
|
||||
}
|
||||
var accountInfo AccountInfo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
|
||||
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
|
||||
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
|
||||
@@ -182,3 +182,34 @@ type FoldersList struct {
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
|
||||
// AccountInfo is the structure how 1Fichier returns user info
|
||||
type AccountInfo struct {
|
||||
StatsDate string `json:"stats_date"`
|
||||
MailRM string `json:"mail_rm"`
|
||||
DefaultQuota int64 `json:"default_quota"`
|
||||
UploadForbidden string `json:"upload_forbidden"`
|
||||
PageLimit int `json:"page_limit"`
|
||||
ColdStorage int64 `json:"cold_storage"`
|
||||
Status string `json:"status"`
|
||||
UseCDN string `json:"use_cdn"`
|
||||
AvailableColdStorage int64 `json:"available_cold_storage"`
|
||||
DefaultPort string `json:"default_port"`
|
||||
DefaultDomain int `json:"default_domain"`
|
||||
Email string `json:"email"`
|
||||
DownloadMenu string `json:"download_menu"`
|
||||
FTPDID int `json:"ftp_did"`
|
||||
DefaultPortFiles string `json:"default_port_files"`
|
||||
FTPReport string `json:"ftp_report"`
|
||||
OverQuota int64 `json:"overquota"`
|
||||
AvailableStorage int64 `json:"available_storage"`
|
||||
CDN string `json:"cdn"`
|
||||
Offer string `json:"offer"`
|
||||
SubscriptionEnd string `json:"subscription_end"`
|
||||
TFA string `json:"2fa"`
|
||||
AllowedColdStorage int64 `json:"allowed_cold_storage"`
|
||||
HotStorage int64 `json:"hot_storage"`
|
||||
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
|
||||
FTPMode string `json:"ftp_mode"`
|
||||
RUReport string `json:"ru_report"`
|
||||
}
|
||||
|
||||
@@ -52,11 +52,13 @@ func init() {
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
Default: 21,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password.",
|
||||
@@ -98,6 +100,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Help: "Disable using MLSD even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_utf8",
|
||||
Help: "Disable using UTF-8 even if server advertises support.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writing_mdtm",
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
@@ -182,6 +189,7 @@ type Options struct {
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
@@ -336,6 +344,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
if f.opt.DisableUTF8 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
|
||||
}
|
||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -65,7 +66,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -182,15 +183,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
}, {
|
||||
Value: "asia-northeast2",
|
||||
Help: "Osaka",
|
||||
}, {
|
||||
Value: "asia-northeast3",
|
||||
Help: "Seoul",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
}, {
|
||||
Value: "asia-south2",
|
||||
Help: "Delhi",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "asia-southeast2",
|
||||
Help: "Jakarta",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
}, {
|
||||
Value: "australia-southeast2",
|
||||
Help: "Melbourne",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
@@ -206,6 +222,12 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
}, {
|
||||
Value: "europe-west6",
|
||||
Help: "Zürich",
|
||||
}, {
|
||||
Value: "europe-central2",
|
||||
Help: "Warsaw",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
@@ -221,6 +243,33 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
}, {
|
||||
Value: "us-west3",
|
||||
Help: "Salt Lake City",
|
||||
}, {
|
||||
Value: "us-west4",
|
||||
Help: "Las Vegas",
|
||||
}, {
|
||||
Value: "northamerica-northeast1",
|
||||
Help: "Montréal",
|
||||
}, {
|
||||
Value: "northamerica-northeast2",
|
||||
Help: "Toronto",
|
||||
}, {
|
||||
Value: "southamerica-east1",
|
||||
Help: "São Paulo",
|
||||
}, {
|
||||
Value: "southamerica-west1",
|
||||
Help: "Santiago",
|
||||
}, {
|
||||
Value: "asia1",
|
||||
Help: "Dual region: asia-northeast1 and asia-northeast2.",
|
||||
}, {
|
||||
Value: "eur4",
|
||||
Help: "Dual region: europe-north1 and europe-west4.",
|
||||
}, {
|
||||
Value: "nam4",
|
||||
Help: "Dual region: us-central1 and us-east1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -247,6 +296,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_compressed",
|
||||
Help: `If set this will download compressed objects as-is.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will transparently decompress these files on
|
||||
download. This means that rclone can't check the hash or the size of
|
||||
the file as both of these refer to the compressed object.
|
||||
|
||||
If this flag is set then rclone will download files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
can check the size and hash but the file contents will be compressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -269,21 +342,24 @@ type Options struct {
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
DownloadCompressed bool `config:"download_compressed"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
warnCompressed sync.Once // warn once about compressed files
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -297,6 +373,7 @@ type Object struct {
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
gzipped bool // set if object has Content-Encoding: gzip
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -434,7 +511,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -792,6 +869,14 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
||||
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
@@ -825,7 +910,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -909,6 +994,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
o.url = info.MediaLink
|
||||
o.bytes = int64(info.Size)
|
||||
o.mimeType = info.ContentType
|
||||
o.gzipped = info.ContentEncoding == "gzip"
|
||||
|
||||
// Read md5sum
|
||||
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
||||
@@ -947,6 +1033,15 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
} else {
|
||||
o.modTime = modTime
|
||||
}
|
||||
|
||||
// If gunzipping then size and md5sum are unknown
|
||||
if o.gzipped && !o.fs.opt.DownloadCompressed {
|
||||
o.bytes = -1
|
||||
o.md5sum = ""
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
fs.Logf(o.fs, "Decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-download-compressed to override")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
@@ -1047,6 +1142,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
if o.gzipped && o.fs.opt.DownloadCompressed {
|
||||
// Allow files which are stored on the cloud storage system
|
||||
// compressed to be downloaded without being decompressed. Note
|
||||
// that setting this here overrides the automatic decompression
|
||||
// in the Transport.
|
||||
//
|
||||
// See: https://cloud.google.com/storage/docs/transcoding
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1075,7 +1179,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -562,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
for i := range items {
|
||||
item := &result.MediaItems[i]
|
||||
remote := item.Filename
|
||||
remote = strings.Replace(remote, "/", "/", -1)
|
||||
remote = strings.ReplaceAll(remote, "/", "/")
|
||||
err = fn(remote, item, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -202,7 +202,11 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
obj, err := f.wrapObject(x, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashEntries = append(hashEntries, obj)
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
@@ -251,7 +255,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
@@ -261,7 +265,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
@@ -278,7 +282,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if do := f.Fs.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return errors.New("CleanUp not supported")
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
@@ -286,7 +290,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if do := f.Fs.Features().About; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
@@ -348,7 +352,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -371,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
return f.wrapObject(oResult, nil)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
@@ -410,7 +414,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
return f.wrapObject(o, err)
|
||||
}
|
||||
|
||||
//
|
||||
@@ -424,11 +428,15 @@ type Object struct {
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return &Object{Object: o, f: f}, nil
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
|
||||
@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
o fs.Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
o, err = f.wrapObject(oResult, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
err := o.(*Object).putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
|
||||
@@ -22,9 +22,8 @@ func init() {
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Required: false,
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
@@ -36,7 +35,6 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
@@ -46,7 +44,6 @@ Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -52,8 +52,7 @@ The input format is comma separated list of key,value pairs. Standard
|
||||
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -74,8 +73,9 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
Help: `Don't use HEAD requests.
|
||||
|
||||
HEAD requests are mainly used to find file sizes in dir listing.
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
@@ -84,12 +84,9 @@ directory listing to:
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
that directory listings are much quicker, but rclone won't have the times or
|
||||
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -133,11 +130,87 @@ func statusError(res *http.Response, err error) error {
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
return fmt.Errorf("HTTP Error: %s", res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFsEndpoint decides if url is to be considered a file or directory,
|
||||
// and returns a proper endpoint url to use for the fs.
|
||||
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
|
||||
// If url ends with '/' it is already a proper url always assumed to be a directory.
|
||||
if url[len(url)-1] == '/' {
|
||||
return url, false
|
||||
}
|
||||
|
||||
// If url does not end with '/' we send a HEAD request to decide
|
||||
// if it is directory or file, and if directory appends the missing
|
||||
// '/', or if file returns the directory url to parent instead.
|
||||
createFileResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
|
||||
parent, _ := path.Split(url)
|
||||
return parent, true
|
||||
}
|
||||
createDirResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
|
||||
return url + "/", false
|
||||
}
|
||||
|
||||
// If HEAD requests are not allowed we just have to assume it is a file.
|
||||
if opt.NoHead {
|
||||
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// Use a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
|
||||
return createDirResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusMovedPermanently ||
|
||||
res.StatusCode == http.StatusFound ||
|
||||
res.StatusCode == http.StatusSeeOther ||
|
||||
res.StatusCode == http.StatusTemporaryRedirect ||
|
||||
res.StatusCode == http.StatusPermanentRedirect {
|
||||
redir := res.Header.Get("Location")
|
||||
if redir != "" {
|
||||
if redir[len(redir)-1] == '/' {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
|
||||
return createDirResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
|
||||
return createFileResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -168,37 +241,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||
fs.Debugf(nil, "Root: %s", endpoint)
|
||||
u, err = url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -216,12 +261,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -297,7 +346,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
if strings.Contains(uStr, "?") {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
@@ -409,7 +458,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
return nil, fmt.Errorf("readDir: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Can't parse content type %q", contentType)
|
||||
return nil, fmt.Errorf("can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -24,10 +26,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
@@ -35,6 +38,22 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := ioutil.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
@@ -91,7 +110,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(6), e.Size())
|
||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -108,7 +127,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
assert.Equal(t, int64(40+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -141,7 +160,7 @@ func TestListSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "three/underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -154,7 +173,7 @@ func TestNewObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -187,7 +206,11 @@ func TestOpen(t *testing.T) {
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
@@ -236,7 +259,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -353,3 +376,106 @@ func TestParseCaddy(t *testing.T) {
|
||||
"v1.36-22-g06ea13a-ssh-agentβ/",
|
||||
})
|
||||
}
|
||||
|
||||
func TestFsNoSlashRoots(t *testing.T) {
|
||||
// Test Fs with roots that does not end with '/', the logic that
|
||||
// decides if url is to be considered a file or directory, based
|
||||
// on result from a HEAD request.
|
||||
|
||||
// Handler for faking HEAD responses with different status codes
|
||||
headCount := 0
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "HEAD" {
|
||||
headCount++
|
||||
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
|
||||
require.NoError(t, err)
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/") {
|
||||
var redir string
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
|
||||
redir = "/redirected"
|
||||
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
|
||||
redir = "/redirected/"
|
||||
} else {
|
||||
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
|
||||
}
|
||||
http.Redirect(w, r, redir, responseCode)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(responseCode), responseCode)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// Test
|
||||
for i, test := range []struct {
|
||||
root string
|
||||
isFile bool
|
||||
}{
|
||||
// 2xx success
|
||||
{"parent/200", true},
|
||||
{"parent/204", true},
|
||||
|
||||
// 3xx redirection Redirect status 301, 302, 303, 307, 308
|
||||
{"redirect/file/301", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/302", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/303", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
|
||||
{"redirect/file/307", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/308", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
// 4xx client errors
|
||||
{"parent/403", true}, // Forbidden status (head request blocked)
|
||||
{"parent/404", false}, // Not found status
|
||||
} {
|
||||
for _, noHead := range []bool{false, true} {
|
||||
var isFile bool
|
||||
if noHead {
|
||||
m.Set("no_head", "true")
|
||||
isFile = true
|
||||
} else {
|
||||
m.Set("no_head", "false")
|
||||
isFile = test.isFile
|
||||
}
|
||||
headCount = 0
|
||||
f, err := NewFs(context.Background(), remoteName, test.root, m)
|
||||
if noHead {
|
||||
assert.Equal(t, 0, headCount)
|
||||
} else {
|
||||
assert.Equal(t, 1, headCount)
|
||||
}
|
||||
if isFile {
|
||||
assert.ErrorIs(t, err, fs.ErrorIsFile)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
var endpoint string
|
||||
if isFile {
|
||||
parent, _ := path.Split(test.root)
|
||||
endpoint = "/" + parent
|
||||
} else {
|
||||
endpoint = "/" + test.root + "/"
|
||||
}
|
||||
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
|
||||
assert.Equal(t, ts.URL+endpoint, f.String(), what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +119,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
1109
backend/internetarchive/internetarchive.go
Normal file
1109
backend/internetarchive/internetarchive.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/internetarchive/internetarchive_test.go
Normal file
17
backend/internetarchive/internetarchive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test internetarchive filesystem interface
|
||||
package internetarchive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/internetarchive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestIA:lesmi-rclone-test/",
|
||||
NilObject: (*internetarchive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -8,42 +8,69 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
// default time format historically used for all request and responses.
|
||||
// Similar to time.RFC3339, but with an extra '-' in front of 'T',
|
||||
// and no ':' separator in timezone offset. Some newer endpoints have
|
||||
// moved to proper time.RFC3339 conformant format instead.
|
||||
jottaTimeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// unmarshalXML turns XML into a Time
|
||||
func unmarshalXMLTime(d *xml.Decoder, start xml.StartElement, timeFormat string) (time.Time, error) {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
return time.Time{}, err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
return time.Time{}, nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
return newTime, nil
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
// JottaTime represents time values in the classic API using a custom RFC3339 like format
|
||||
type JottaTime time.Time
|
||||
|
||||
// String returns JottaTime string in Jottacloud classic format
|
||||
func (t JottaTime) String() string { return time.Time(t).Format(jottaTimeFormat) }
|
||||
|
||||
// UnmarshalXML turns XML into a JottaTime
|
||||
func (t *JottaTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, jottaTimeFormat)
|
||||
*t = JottaTime(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// MarshalXML turns a JottaTime into XML
|
||||
func (t *JottaTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// Rfc3339Time represents time values in the newer APIs using standard RFC3339 format
|
||||
type Rfc3339Time time.Time
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
// String returns Rfc3339Time string in Jottacloud RFC3339 format
|
||||
func (t Rfc3339Time) String() string { return time.Time(t).Format(time.RFC3339) }
|
||||
|
||||
// UnmarshalXML turns XML into a Rfc3339Time
|
||||
func (t *Rfc3339Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, time.RFC3339)
|
||||
*t = Rfc3339Time(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Rfc3339Time into XML
|
||||
func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// MarshalJSON turns a Rfc3339Time into JSON
|
||||
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
|
||||
}
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
@@ -122,16 +149,11 @@ type AllocateFileResponse struct {
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
Path string `json:"path"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified int64 `json:"modified"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
@@ -338,9 +360,9 @@ type JottaFolder struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
CreatedAt JottaTime `xml:"created"`
|
||||
ModifiedAt JottaTime `xml:"modified"`
|
||||
Updated JottaTime `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
@@ -365,17 +387,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt JottaTime `xml:"currentRevision>created"`
|
||||
ModifiedAt JottaTime `xml:"currentRevision>modified"`
|
||||
Updated JottaTime `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -190,7 +191,7 @@ machines.`)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -518,7 +519,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
values.Set("client_id", defaultClientID)
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("scope", "openid offline_access")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
@@ -648,7 +649,7 @@ func errorHandler(resp *http.Response) error {
|
||||
|
||||
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
|
||||
func urlPathEscape(in string) string {
|
||||
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||
return strings.ReplaceAll(rest.URLPathEscape(in), "+", "%2B")
|
||||
}
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
@@ -931,49 +932,106 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listFileDirFn is called from listFileDir to handle an object.
|
||||
type listFileDirFn func(fs.DirEntry) error
|
||||
func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
startPathLength := len(startPath)
|
||||
for i := range startFolder.Folders {
|
||||
folder := &startFolder.Folders[i]
|
||||
if !f.validFolder(folder) {
|
||||
return nil
|
||||
type stats struct {
|
||||
Folders int `xml:"folders"`
|
||||
Files int `xml:"files"`
|
||||
}
|
||||
var expected, actual stats
|
||||
|
||||
type xmlFile struct {
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified api.Rfc3339Time `xml:"modified"` // Note: Liststream response includes 3 decimal milliseconds, but we ignore them since there is second precision everywhere else
|
||||
Created api.Rfc3339Time `xml:"created"`
|
||||
}
|
||||
|
||||
type xmlFolder struct {
|
||||
Path string `xml:"path"`
|
||||
}
|
||||
|
||||
addFolder := func(path string) error {
|
||||
return callback(fs.NewDir(filesystem.opt.Enc.ToStandardPath(path), time.Time{}))
|
||||
}
|
||||
|
||||
addFile := func(f *xmlFile) error {
|
||||
return callback(&Object{
|
||||
hasMetaData: true,
|
||||
fs: filesystem,
|
||||
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
|
||||
size: f.Size,
|
||||
md5: f.Checksum,
|
||||
modTime: time.Time(f.Modified),
|
||||
})
|
||||
}
|
||||
|
||||
// liststream paths are /mountpoint/root/path
|
||||
// so the returned paths should have /mountpoint/root/ trimmed
|
||||
// as the caller is expecting path.
|
||||
pathPrefix := filesystem.opt.Enc.FromStandardPath(path.Join("/", filesystem.opt.Mountpoint, filesystem.root))
|
||||
trimPathPrefix := func(p string) string {
|
||||
p = strings.TrimPrefix(p, pathPrefix)
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
return p
|
||||
}
|
||||
|
||||
uniqueFolders := map[string]bool{}
|
||||
decoder := xml.NewDecoder(r)
|
||||
|
||||
for {
|
||||
t, err := decoder.Token()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range folder.Files {
|
||||
file := &folder.Files[i]
|
||||
if f.validFile(file) {
|
||||
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fn(o)
|
||||
if err != nil {
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
switch se.Name.Local {
|
||||
case "file":
|
||||
var f xmlFile
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
actual.Files++
|
||||
if !uniqueFolders[f.Path] {
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := addFile(&f); err != nil {
|
||||
return err
|
||||
}
|
||||
case "folder":
|
||||
var f xmlFolder
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
case "stats":
|
||||
if err := decoder.DecodeElement(&expected, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if expected.Folders != actual.Folders ||
|
||||
expected.Files != actual.Files {
|
||||
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -988,12 +1046,23 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
Path: f.filePath(dir),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("mode", "list")
|
||||
opts.Parameters.Set("mode", "liststream")
|
||||
list := walk.NewListRHelper(callback)
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
err = parseListRStream(ctx, resp.Body, f, func(d fs.DirEntry) error {
|
||||
if d.Remote() == dir {
|
||||
return nil
|
||||
}
|
||||
return list.Add(d)
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1005,10 +1074,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
return fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1126,6 +1191,45 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// createOrUpdate tries to make remote file match without uploading.
|
||||
// If the remote file exists, and has matching size and md5, only
|
||||
// timestamps are updated. If the file does not exist or does does
|
||||
// not match size and md5, but matching content can be constructed
|
||||
// from deduplication, the file will be updated/created. If the file
|
||||
// is currently in trash, but can be made to match, it will be
|
||||
// restored. Returns ErrorObjectNotFound if upload will be necessary
|
||||
// to get a matching remote file.
|
||||
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.filePath(file),
|
||||
Parameters: url.Values{},
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
|
||||
opts.Parameters.Set("cphash", "true")
|
||||
|
||||
fileDate := api.JottaTime(modTime).String()
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
opts.ExtraHeaders["JMd5"] = md5
|
||||
opts.ExtraHeaders["JCreated"] = fileDate
|
||||
opts.ExtraHeaders["JModified"] = fileDate
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist, i.e. not matching size and md5, and not possible to make it by deduplication
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
@@ -1169,6 +1273,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
@@ -1470,40 +1580,19 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// prepare allocate request with existing metadata but changed timestamps
|
||||
var resp *http.Response
|
||||
var options []fs.OpenOption
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(modTime).APIString()
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: o.size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: o.md5,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
// request check/update with existing metadata and new modtime
|
||||
// (note that if size/md5 does not match, the file content will
|
||||
// also be modified if deduplication is possible, i.e. it is
|
||||
// important to use correct/latest values)
|
||||
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// check response
|
||||
if response.State != "COMPLETED" {
|
||||
// could be the file was modified (size/md5 changed) between readMetaData and the allocate request
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
|
||||
// update local metadata
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
@@ -1641,7 +1730,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
|
||||
@@ -28,33 +28,57 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your storage provider.",
|
||||
// NOTE if you add a new provider here, then add it in the
|
||||
// setProviderDefaults() function and update options accordingly
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "koofr",
|
||||
Help: "Koofr, https://app.koofr.net/",
|
||||
}, {
|
||||
Value: "digistorage",
|
||||
Help: "Digi Storage, https://storage.rcs-rds.ro/",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Any other Koofr API compatible storage service",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Default: "https://app.koofr.net",
|
||||
Provider: "other",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name.",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Provider: "koofr",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
|
||||
Provider: "digistorage",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at your service's settings page).",
|
||||
Provider: "other",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -71,6 +95,7 @@ func init() {
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
@@ -255,13 +280,38 @@ func (f *Fs) fullPath(part string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func setProviderDefaults(opt *Options) {
|
||||
// handle old, provider-less configs
|
||||
if opt.Provider == "" {
|
||||
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
|
||||
opt.Provider = "koofr"
|
||||
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
|
||||
opt.Provider = "digistorage"
|
||||
} else {
|
||||
opt.Provider = "other"
|
||||
}
|
||||
}
|
||||
// now assign an endpoint
|
||||
if opt.Provider == "koofr" {
|
||||
opt.Endpoint = "https://app.koofr.net"
|
||||
} else if opt.Provider == "digistorage" {
|
||||
opt.Endpoint = "https://storage.rcs-rds.ro"
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and rclone configuration options
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setProviderDefaults(opt)
|
||||
return NewFsFromOptions(ctx, name, root, opt)
|
||||
}
|
||||
|
||||
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
|
||||
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1133,6 +1133,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Wipe hashes before update
|
||||
o.clearHashCache()
|
||||
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
@@ -1295,6 +1298,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// clearHashCache wipes any cached hashes for the object
|
||||
func (o *Object) clearHashCache() {
|
||||
o.fs.objectMetaMu.Lock()
|
||||
o.hashes = nil
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
func (o *Object) lstat() error {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
@@ -1306,6 +1316,7 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
o.clearHashCache()
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -166,3 +168,64 @@ func TestSymlinkError(t *testing.T) {
|
||||
_, err := NewFs(context.Background(), "local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the hash is as expected
|
||||
md5, err = o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
|
||||
}
|
||||
|
||||
// Test hashes on deleting an object
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Delete the object
|
||||
require.NoError(t, o.Remove(ctx))
|
||||
|
||||
// Test the hash cache is empty
|
||||
require.Nil(t, o.(*Object).hashes)
|
||||
|
||||
// Test the hash returns an error
|
||||
_, err = o.Hash(ctx, hash.MD5)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ type UserInfoResponse struct {
|
||||
AutoProlong bool `json:"auto_prolong"`
|
||||
Basequota int64 `json:"basequota"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Expires int `json:"expires"`
|
||||
Expires int64 `json:"expires"`
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
@@ -80,7 +80,7 @@ type UserInfoResponse struct {
|
||||
FileSizeLimit int64 `json:"file_size_limit"`
|
||||
Space struct {
|
||||
BytesTotal int64 `json:"bytes_total"`
|
||||
BytesUsed int `json:"bytes_used"`
|
||||
BytesUsed int64 `json:"bytes_used"`
|
||||
Overquota bool `json:"overquota"`
|
||||
} `json:"space"`
|
||||
} `json:"cloud"`
|
||||
|
||||
@@ -1572,7 +1572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
total := info.Body.Cloud.Space.BytesTotal
|
||||
used := int64(info.Body.Cloud.Space.BytesUsed)
|
||||
used := info.Body.Cloud.Space.BytesUsed
|
||||
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total),
|
||||
|
||||
1277
backend/netstorage/netstorage.go
Executable file
1277
backend/netstorage/netstorage.go
Executable file
File diff suppressed because it is too large
Load Diff
16
backend/netstorage/netstorage_test.go
Normal file
16
backend/netstorage/netstorage_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package netstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/netstorage"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestnStorage:",
|
||||
NilObject: (*netstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -65,9 +65,12 @@ var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
scopesWithSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"}
|
||||
scopesWithoutSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
|
||||
Scopes: scopesWithSitePermission,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -137,6 +140,26 @@ Note that the chunks will be buffered into memory.`,
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
||||
This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
|
||||
If set to true, you will no longer be able to search for a SharePoint site when
|
||||
configuring drive ID, because rclone will not request Sites.Read.All permission.
|
||||
Set it to true if your organization didn't assign Sites.Read.All permission to the
|
||||
application, and your organization disallows users to consent app permission
|
||||
request on their own.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "expose_onenote_files",
|
||||
Help: `Set to make OneNote files show up in directory listings.
|
||||
@@ -278,6 +301,10 @@ type siteResource struct {
|
||||
type siteResponse struct {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
type deltaResponse struct {
|
||||
DeltaLink string `json:"@odata.deltaLink"`
|
||||
Value []api.Item `json:"value"`
|
||||
}
|
||||
|
||||
// Get the region and graphURL from the config
|
||||
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
|
||||
@@ -374,6 +401,12 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
region, graphURL := getRegionURL(m)
|
||||
|
||||
if config.State == "" {
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
@@ -527,6 +560,8 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
@@ -618,6 +653,12 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
retry := false
|
||||
if resp != nil {
|
||||
switch resp.StatusCode {
|
||||
case 400:
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
case 401:
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
retry = true
|
||||
@@ -789,6 +830,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
@@ -826,15 +872,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
// Get rootID
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
var rootID = opt.RootFolderID
|
||||
if rootID == "" {
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
}
|
||||
rootID = rootInfo.GetID()
|
||||
}
|
||||
if rootInfo.GetID() == "" {
|
||||
if rootID == "" {
|
||||
return nil, errors.New("failed to get root: ID was empty")
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
@@ -842,7 +892,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
@@ -1456,7 +1506,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
@@ -2256,6 +2306,142 @@ func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
|
||||
return canonicalDriveID
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
//
|
||||
// The Onedrive implementation gives the whole hierarchy up to the top when
|
||||
// an object is changed. For instance, if a/b/c is changed, this function
|
||||
// will call notifyFunc with a, a/b and a/b/c.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the StartPageToken early so all changes from now on get processed
|
||||
nextDeltaToken, err := f.changeNotifyStartPageToken(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Could not get first deltaLink: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Next delta token is: %s", nextDeltaToken)
|
||||
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
select {
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
if pollInterval != 0 {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
fs.Debugf(f, "Checking for changes on remote")
|
||||
nextDeltaToken, err = f.changeNotifyRunner(ctx, notifyFunc, nextDeltaToken)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken string, err error) {
|
||||
delta, err := f.changeNotifyNextChange(ctx, "latest")
|
||||
parsedURL, err := url.Parse(delta.DeltaLink)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nextDeltaToken = parsedURL.Query().Get("token")
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta deltaResponse, err error) {
|
||||
opts := f.buildDriveDeltaOpts(token)
|
||||
|
||||
_, err = f.srv.CallJSON(ctx, &opts, nil, &delta)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) buildDriveDeltaOpts(token string) rest.Opts {
|
||||
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
|
||||
|
||||
return rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + f.driveID + "/root/delta",
|
||||
Parameters: map[string][]string{"token": {token}},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), deltaToken string) (nextDeltaToken string, err error) {
|
||||
delta, err := f.changeNotifyNextChange(ctx, deltaToken)
|
||||
parsedURL, err := url.Parse(delta.DeltaLink)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nextDeltaToken = parsedURL.Query().Get("token")
|
||||
|
||||
for _, item := range delta.Value {
|
||||
isDriveRootFolder := item.GetParentReference().ID == ""
|
||||
if isDriveRootFolder {
|
||||
continue
|
||||
}
|
||||
|
||||
fullPath, err := getItemFullPath(&item)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Could not get item full path: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fullPath == f.root {
|
||||
continue
|
||||
}
|
||||
|
||||
relName, insideRoot := getRelativePathInsideBase(f.root, fullPath)
|
||||
if !insideRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.GetFile() != nil {
|
||||
notifyFunc(relName, fs.EntryObject)
|
||||
} else if item.GetFolder() != nil {
|
||||
notifyFunc(relName, fs.EntryDirectory)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getItemFullPath(item *api.Item) (fullPath string, err error) {
|
||||
err = nil
|
||||
fullPath = item.GetName()
|
||||
if parent := item.GetParentReference(); parent != nil && parent.Path != "" {
|
||||
pathParts := strings.SplitN(parent.Path, ":", 2)
|
||||
if len(pathParts) != 2 {
|
||||
err = fmt.Errorf("invalid parent path: %s", parent.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if pathParts[1] != "" {
|
||||
fullPath = strings.TrimPrefix(pathParts[1], "/") + "/" + fullPath
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
|
||||
@@ -136,7 +136,8 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for _, d := range q.data[:len(q.data)-1] {
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
// object storage system.
|
||||
package pcloud
|
||||
|
||||
// FIXME implement ListR? /listfolder can do recursive lists
|
||||
|
||||
// FIXME cleanup returns login required?
|
||||
|
||||
// FIXME mime type? Fix overview if implement.
|
||||
@@ -27,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -246,7 +245,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -380,7 +379,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -446,14 +445,16 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/listfolder",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
if recursive {
|
||||
opts.Parameters.Set("recursive", "1")
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
|
||||
// FIXME can do recursive
|
||||
|
||||
var result api.ItemResult
|
||||
var resp *http.Response
|
||||
@@ -465,26 +466,71 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
if err != nil {
|
||||
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
for i := range result.Metadata.Contents {
|
||||
item := &result.Metadata.Contents[i]
|
||||
if item.IsFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
var recursiveContents func(is []api.Item, path string)
|
||||
recursiveContents = func(is []api.Item, path string) {
|
||||
for i := range is {
|
||||
item := &is[i]
|
||||
if item.IsFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
item.Name = path + f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if recursive {
|
||||
recursiveContents(item.Contents, item.Name+"/")
|
||||
}
|
||||
}
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
recursiveContents(result.Metadata.Contents, "")
|
||||
return
|
||||
}
|
||||
|
||||
// listHelper iterates over all items from the directory
|
||||
// and calls the callback for each element.
|
||||
func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
iErr = callback(d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
iErr = callback(o)
|
||||
}
|
||||
if iErr != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return iErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -495,36 +541,24 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return false
|
||||
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
|
||||
entries = append(entries, o)
|
||||
return nil
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
|
||||
return list.Add(o)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -656,7 +690,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -872,7 +906,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||
@@ -1137,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", modTime.Unix()))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
|
||||
|
||||
// Special treatment for a 0 length upload. This doesn't work
|
||||
// with PUT even with Content-Length set (by setting
|
||||
|
||||
@@ -783,10 +783,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CreateDir http: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
if err = info.AsErr(); err != nil {
|
||||
return nil, fmt.Errorf("CreateDir: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
|
||||
|
||||
@@ -4,16 +4,21 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
func checkStatusCode(resp *http.Response, expected int) error {
|
||||
if resp.StatusCode != expected {
|
||||
return &statusCodeError{response: resp}
|
||||
func checkStatusCode(resp *http.Response, expected ...int) error {
|
||||
for _, code := range expected {
|
||||
if resp.StatusCode == code {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return &statusCodeError{response: resp}
|
||||
}
|
||||
|
||||
type statusCodeError struct {
|
||||
@@ -24,8 +29,10 @@ func (e *statusCodeError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
|
||||
}
|
||||
|
||||
// This method is called from fserrors.ShouldRetry() to determine if an error should be retried.
|
||||
// Some errors (e.g. 429 Too Many Requests) are handled before this step, so they are not included here.
|
||||
func (e *statusCodeError) Temporary() bool {
|
||||
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
|
||||
return e.response.StatusCode >= 500
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
@@ -40,6 +47,16 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
err = &statusCodeError{response: perr.Response}
|
||||
}
|
||||
if scerr, ok := err.(*statusCodeError); ok && scerr.response.StatusCode == 429 {
|
||||
delay := defaultRateLimitSleep
|
||||
header := scerr.response.Header.Get("x-ratelimit-reset")
|
||||
if header != "" {
|
||||
if resetTime, cerr := strconv.ParseInt(header, 10, 64); cerr == nil {
|
||||
delay = time.Until(time.Unix(resetTime+1, 0))
|
||||
}
|
||||
}
|
||||
return true, pacer.RetryAfterError(scerr, delay)
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -302,8 +302,8 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.StatusCode != 201 {
|
||||
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
|
||||
if err := checkStatusCode(resp, 201); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
location = resp.Header.Get("location")
|
||||
if location == "" {
|
||||
@@ -647,7 +647,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
|
||||
|
||||
@@ -241,7 +241,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
return shouldRetry(ctx, err)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
if err := checkStatusCode(resp, 200, 206); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
|
||||
@@ -33,8 +33,9 @@ const (
|
||||
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
defaultRateLimitSleep = 60 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
730
backend/s3/s3.go
730
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -60,11 +60,13 @@ func init() {
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser + ".",
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22).",
|
||||
Name: "port",
|
||||
Help: "SSH port number.",
|
||||
Default: 22,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
@@ -107,7 +109,7 @@ when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: `Enable the use of insecure ciphers and key exchange methods.
|
||||
Help: `Enable the use of insecure ciphers and key exchange methods.
|
||||
|
||||
This enables the use of the following insecure ciphers and key exchange methods:
|
||||
|
||||
@@ -1218,7 +1220,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("your remote may not support About: %w", err)
|
||||
return nil, fmt.Errorf("your remote may not have the required df utility: %w", err)
|
||||
}
|
||||
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
|
||||
@@ -1309,7 +1311,7 @@ var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\
|
||||
// when sending it to a shell.
|
||||
func shellEscape(str string) string {
|
||||
safe := shellEscapeRegex.ReplaceAllString(str, `\$0`)
|
||||
return strings.Replace(safe, "\n", "'\n'", -1)
|
||||
return strings.ReplaceAll(safe, "\n", "'\n'")
|
||||
}
|
||||
|
||||
// Converts a byte array from the SSH session returned by
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
package tardigrade
|
||||
// Package storj provides an interface to Storj decentralized object storage.
|
||||
package storj
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -31,16 +31,17 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
Name: "storj",
|
||||
Description: "Storj Decentralized Cloud Storage",
|
||||
Aliases: []string{"tardigrade"},
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
provider, _ := m.Get(fs.ConfigProvider)
|
||||
@@ -84,10 +85,9 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Required: true,
|
||||
Default: existingProvider,
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Default: existingProvider,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "existing",
|
||||
Help: "Use an existing access grant.",
|
||||
@@ -99,23 +99,21 @@ func init() {
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Required: false,
|
||||
Provider: "existing",
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
Default: "us-central-1.tardigrade.io",
|
||||
Default: "us-central-1.storj.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-central-1.tardigrade.io",
|
||||
Value: "us-central-1.storj.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "europe-west-1.tardigrade.io",
|
||||
Value: "europe-west-1.storj.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "asia-east-1.tardigrade.io",
|
||||
Value: "asia-east-1.storj.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
@@ -123,13 +121,11 @@ func init() {
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
},
|
||||
@@ -145,7 +141,7 @@ type Options struct {
|
||||
Passphrase string `config:"passphrase"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to Tardigrade
|
||||
// Fs represents a remote to Storj
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the filesystem
|
||||
@@ -163,11 +159,12 @@ var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Tardigrade.
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||
// Setup filesystem and connection to Tardigrade
|
||||
// Setup filesystem and connection to Storj
|
||||
root = norm.NFC.String(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -188,24 +185,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if f.opts.Access != "" {
|
||||
access, err = uplink.ParseAccess(f.opts.Access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
|
||||
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,7 +234,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if bucketName != "" && bucketPath != "" {
|
||||
_, err = project.StatBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return f, fmt.Errorf("tardigrade: bucket: %w", err)
|
||||
return f, fmt.Errorf("storj: bucket: %w", err)
|
||||
}
|
||||
|
||||
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||
@@ -263,7 +260,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// connect opens a connection to Tardigrade.
|
||||
// connect opens a connection to Storj.
|
||||
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
fs.Debugf(f, "connecting...")
|
||||
defer fs.Debugf(f, "connected: %+v", err)
|
||||
@@ -274,7 +271,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: project: %w", err)
|
||||
return nil, fmt.Errorf("storj: project: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -582,7 +579,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, "", upload.Info()), nil
|
||||
return newObjectFromUplink(f, src.Remote(), upload.Info()), nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate
|
||||
@@ -683,3 +680,43 @@ func newPrefix(prefix string) string {
|
||||
|
||||
return prefix + "/"
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.MoveObjectOptions{}
|
||||
|
||||
// Do the move
|
||||
err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
// Make sure destination bucket exists
|
||||
_, err := f.project.EnsureBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err)
|
||||
}
|
||||
// And try again
|
||||
err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package tardigrade
|
||||
package storj
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// Object describes a Tardigrade object
|
||||
// Object describes a Storj object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
|
||||
@@ -32,7 +32,7 @@ type Object struct {
|
||||
// Check the interfaces are satisfied.
|
||||
var _ fs.Object = &Object{}
|
||||
|
||||
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
|
||||
// newObjectFromUplink creates a new object from a Storj uplink object.
|
||||
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
|
||||
// Attempt to use the modified time from the metadata. Otherwise
|
||||
// fallback to the server time.
|
||||
@@ -1,20 +1,20 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Test Tardigrade filesystem interface
|
||||
package tardigrade_test
|
||||
// Test Storj filesystem interface
|
||||
package storj_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/tardigrade"
|
||||
"github.com/rclone/rclone/backend/storj"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestTardigrade:",
|
||||
NilObject: (*tardigrade.Object)(nil),
|
||||
RemoteName: "TestStorj:",
|
||||
NilObject: (*storj.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package tardigrade
|
||||
package storj
|
||||
@@ -754,22 +754,34 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var total, objects int64
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
if f.rootContainer != "" {
|
||||
var container swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
container, _, err = f.c.Container(ctx, f.rootContainer)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container info failed: %w", err)
|
||||
}
|
||||
total = container.Bytes
|
||||
objects = container.Count
|
||||
} else {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
}
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(total), // bytes in use
|
||||
Objects: fs.NewUsageValue(objects), // objects in use
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -84,6 +85,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||
if len(entries) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -33,25 +34,21 @@ func init() {
|
||||
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Required: true,
|
||||
Default: "epall",
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Default: "epall",
|
||||
}, {
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Required: true,
|
||||
Default: "epmfs",
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Default: "epmfs",
|
||||
}, {
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Required: true,
|
||||
Default: "ff",
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Default: "ff",
|
||||
}, {
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
||||
Required: true,
|
||||
Default: 120,
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
||||
Default: 120,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -144,22 +141,20 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// mkdir makes the directory passed in and returns the upstreams used
|
||||
func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
parent := parentDir(dir)
|
||||
if dir != parent {
|
||||
upstreams, err = f.mkdir(ctx, parent)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
@@ -168,7 +163,17 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return upstreams, nil
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
_, err := f.mkdir(ctx, dir)
|
||||
return err
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
@@ -452,10 +457,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
srcPath := src.Remote()
|
||||
upstreams, err := f.create(ctx, srcPath)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if err := f.Mkdir(ctx, parentDir(srcPath)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upstreams, err = f.create(ctx, srcPath)
|
||||
upstreams, err = f.mkdir(ctx, parentDir(srcPath))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -490,6 +492,10 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
if len(upstreams) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -20,19 +18,12 @@ import (
|
||||
)
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
|
||||
require.NoError(t, err)
|
||||
dir := t.TempDir()
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
clean = func() {
|
||||
for _, dir := range dirs {
|
||||
err := os.RemoveAll(dir)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
return dirs, clean
|
||||
return dirs
|
||||
}
|
||||
|
||||
func (f *Fs) TestInternalReadOnly(t *testing.T) {
|
||||
@@ -95,8 +86,7 @@ func TestMoveCopy(t *testing.T) {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
ctx := context.Background()
|
||||
dirs, clean := MakeTestDirs(t, 1)
|
||||
defer clean()
|
||||
dirs := MakeTestDirs(t, 1)
|
||||
fsString := fmt.Sprintf(":union,upstreams='%s :memory:bucket':", dirs[0])
|
||||
f, err := fs.NewFs(ctx, fsString)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -27,8 +27,7 @@ func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnion"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -49,8 +48,7 @@ func TestRO(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
|
||||
name := "TestUnionRO"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -71,8 +69,7 @@ func TestNC(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
|
||||
name := "TestUnionNC"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -93,8 +90,7 @@ func TestPolicy1(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy1"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -115,8 +111,7 @@ func TestPolicy2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -137,8 +132,7 @@ func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
@@ -124,6 +124,22 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "base_path",
|
||||
Help: `Base path of expected replies
|
||||
|
||||
Normally WebDAV servers return the files they are listing under the
|
||||
url path as specified above. However some WebDAV servers return files
|
||||
with URLs that are not under the endpoint URL. This causes rclone to
|
||||
get confused and return errors like
|
||||
|
||||
Item with unknown path received: "/remote.php/webdav/folder1/", "/elsewhere/remote.php/webdav/folder1/"
|
||||
|
||||
errors. If that is the case, then set "base_path" to the path
|
||||
specified in the error message up to the first item, in the above
|
||||
example "/elsewhere/remote.php/webdav/".
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -138,6 +154,7 @@ type Options struct {
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
BasePath string `config:"base_path"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -454,7 +471,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.srv.SetHeader("Referer", u.String())
|
||||
if !f.findHeader(opt.Headers, "Referer") {
|
||||
f.srv.SetHeader("Referer", u.String())
|
||||
}
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -517,6 +536,17 @@ func (f *Fs) addHeaders(headers fs.CommaSepList) {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the header was configured
|
||||
func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool {
|
||||
for i := 0; i < len(headers); i += 2 {
|
||||
key := f.opt.Headers[i]
|
||||
if strings.EqualFold(key, find) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fetch the bearer token and set it if successful
|
||||
func (f *Fs) fetchAndSetBearerToken() error {
|
||||
if f.opt.BearerTokenCommand == "" {
|
||||
@@ -680,6 +710,10 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("couldn't join URL: %w", err)
|
||||
}
|
||||
basePath := baseURL.Path
|
||||
if f.opt.BasePath != "" {
|
||||
basePath = f.opt.BasePath
|
||||
}
|
||||
for i := range result.Responses {
|
||||
item := &result.Responses[i]
|
||||
isDir := itemIsDir(item)
|
||||
@@ -694,11 +728,11 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
if isDir {
|
||||
u.Path = addSlash(u.Path)
|
||||
}
|
||||
if !strings.HasPrefix(u.Path, baseURL.Path) {
|
||||
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, baseURL.Path)
|
||||
if !strings.HasPrefix(u.Path, basePath) {
|
||||
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, basePath)
|
||||
continue
|
||||
}
|
||||
subPath := u.Path[len(baseURL.Path):]
|
||||
subPath := u.Path[len(basePath):]
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.ToStandardPath(subPath)
|
||||
}
|
||||
@@ -1148,7 +1182,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("about call failed: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if i, err := strconv.ParseInt(q.Used, 10, 64); err == nil && i >= 0 {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/winfsp/cgofuse.git
|
||||
docker images
|
||||
docker push rclone/xgo-cgofuse
|
||||
|
||||
@@ -52,6 +52,7 @@ var (
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"windows/arm64",
|
||||
"darwin/amd64",
|
||||
"darwin/arm64",
|
||||
"linux/386",
|
||||
@@ -85,6 +86,13 @@ var archFlags = map[string][]string{
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) error {
|
||||
if *debug {
|
||||
@@ -165,13 +173,17 @@ func buildZip(dir string) string {
|
||||
func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
// Make internal version number acceptable to .deb and .rpm
|
||||
pkgVersion := version[1:]
|
||||
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
|
||||
pkgVersion = strings.Replace(pkgVersion, "-", ".", -1)
|
||||
pkgVersion = strings.ReplaceAll(pkgVersion, "β", "-beta")
|
||||
pkgVersion = strings.ReplaceAll(pkgVersion, "-", ".")
|
||||
nfpmArch, ok := goarchToNfpm[goarch]
|
||||
if !ok {
|
||||
nfpmArch = goarch
|
||||
}
|
||||
|
||||
// Make nfpm.yaml from the template
|
||||
substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{
|
||||
"Version": pkgVersion,
|
||||
"Arch": goarch,
|
||||
"Arch": nfpmArch,
|
||||
})
|
||||
|
||||
// build them
|
||||
@@ -253,9 +265,12 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
"-o",
|
||||
sysoPath,
|
||||
}
|
||||
if goarch == "amd64" {
|
||||
if strings.Contains(goarch, "64") {
|
||||
args = append(args, "-64") // Make the syso a 64-bit coff file
|
||||
}
|
||||
if strings.Contains(goarch, "arm") {
|
||||
args = append(args, "-arm") // Make the syso an arm binary
|
||||
}
|
||||
args = append(args, jsonPath)
|
||||
err = runEnv(args, nil)
|
||||
if err != nil {
|
||||
@@ -377,7 +392,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
|
||||
@@ -24,6 +24,7 @@ docs = [
|
||||
"overview.md",
|
||||
"flags.md",
|
||||
"docker.md",
|
||||
"bisync.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
@@ -47,11 +48,13 @@ docs = [
|
||||
"hdfs.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mailru.md",
|
||||
"mega.md",
|
||||
"memory.md",
|
||||
"netstorage.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
@@ -63,8 +66,9 @@ docs = [
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
|
||||
@@ -102,7 +102,7 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
|
||||
}
|
||||
u, err := doAbout(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("About call failed: %w", err)
|
||||
return fmt.Errorf("about call failed: %w", err)
|
||||
}
|
||||
if u == nil {
|
||||
return errors.New("nil usage returned")
|
||||
|
||||
@@ -41,7 +41,7 @@ You can discover what commands a backend implements by using
|
||||
rclone backend help <backendname>
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
@@ -55,7 +55,7 @@ Pass arguments to the backend by placing them on the end of the line
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend/command) in the rc docs.
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
@@ -149,7 +149,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
`, name)
|
||||
for _, cmd := range cmds {
|
||||
|
||||
@@ -489,7 +489,7 @@ func resolveExitCode(err error) {
|
||||
os.Exit(exitcode.TransferExceeded)
|
||||
case fserrors.ShouldRetry(err):
|
||||
os.Exit(exitcode.RetryError)
|
||||
case fserrors.IsNoRetryError(err):
|
||||
case fserrors.IsNoRetryError(err), fserrors.IsNoLowLevelRetryError(err):
|
||||
os.Exit(exitcode.NoRetryError)
|
||||
case fserrors.IsFatalError(err):
|
||||
os.Exit(exitcode.FatalError)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -13,12 +12,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
)
|
||||
|
||||
const fhUnset = ^uint64(0)
|
||||
|
||||
@@ -2,10 +2,9 @@
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -18,12 +17,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -168,7 +167,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
|
||||
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
//go:build cmount && cgo && (linux || darwin || freebsd || windows) && (!race || !windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
// +build !race !windows
|
||||
|
||||
// FIXME this doesn't work with the race detector under Windows either
|
||||
@@ -10,11 +9,17 @@
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
// Disable tests under macOS and the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
vfstest.RunTests(t, false, mount)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build (!linux && !darwin && !freebsd && !windows) || !brew || !cgo || !cmount
|
||||
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
|
||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
|
||||
// +build !linux !cgo !cmount
|
||||
// +build !darwin !cgo !cmount
|
||||
// +build !freebsd !cgo !cmount
|
||||
// +build !windows !cmount
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build cmount && cgo && windows
|
||||
// +build cmount,cgo,windows
|
||||
//go:build cmount && windows
|
||||
// +build cmount,windows
|
||||
|
||||
package cmount
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ rclone.org website.`,
|
||||
var description = map[string]string{}
|
||||
var addDescription func(root *cobra.Command)
|
||||
addDescription = func(root *cobra.Command) {
|
||||
name := strings.Replace(root.CommandPath(), " ", "_", -1) + ".md"
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
description[name] = root.Short
|
||||
for _, c := range root.Commands() {
|
||||
addDescription(c)
|
||||
@@ -93,11 +93,11 @@ rclone.org website.`,
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
data := frontmatter{
|
||||
Date: now,
|
||||
Title: strings.Replace(base, "_", " ", -1),
|
||||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: description[name],
|
||||
Slug: base,
|
||||
URL: "/commands/" + strings.ToLower(base) + "/",
|
||||
Source: strings.Replace(strings.Replace(base, "rclone", "cmd", -1), "_", "/", -1) + "/",
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := frontmatterTemplate.Execute(&buf, data)
|
||||
|
||||
23
cmd/help.go
23
cmd/help.go
@@ -165,7 +165,7 @@ func runRoot(cmd *cobra.Command, args []string) {
|
||||
// setupRootCommand sets default usage, help, and error handling for
|
||||
// the root command.
|
||||
//
|
||||
// Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go
|
||||
// Helpful example: https://github.com/moby/moby/blob/master/cli/cobra.go
|
||||
func setupRootCommand(rootCmd *cobra.Command) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
// Add global flags
|
||||
@@ -329,12 +329,29 @@ func showBackend(name string) {
|
||||
if opt.IsPassword {
|
||||
fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n")
|
||||
}
|
||||
fmt.Printf("Properties:\n\n")
|
||||
fmt.Printf("- Config: %s\n", opt.Name)
|
||||
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
|
||||
if opt.Provider != "" {
|
||||
fmt.Printf("- Provider: %s\n", opt.Provider)
|
||||
}
|
||||
fmt.Printf("- Type: %s\n", opt.Type())
|
||||
fmt.Printf("- Default: %s\n", quoteString(opt.GetValue()))
|
||||
defaultValue := opt.GetValue()
|
||||
// Default value and Required are related: Required means option must
|
||||
// have a value, but if there is a default then a value does not have
|
||||
// to be explicitely set and then Required makes no difference.
|
||||
if defaultValue != "" {
|
||||
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
|
||||
} else {
|
||||
fmt.Printf("- Required: %v\n", opt.Required)
|
||||
}
|
||||
// List examples / possible choices
|
||||
if len(opt.Examples) > 0 {
|
||||
fmt.Printf("- Examples:\n")
|
||||
if opt.Exclusive {
|
||||
fmt.Printf("- Choices:\n")
|
||||
} else {
|
||||
fmt.Printf("- Examples:\n")
|
||||
}
|
||||
for _, ex := range opt.Examples {
|
||||
fmt.Printf(" - %s\n", quoteString(ex.Value))
|
||||
for _, line := range strings.Split(ex.Help, "\n") {
|
||||
|
||||
@@ -86,7 +86,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -25,11 +25,10 @@ func init() {
|
||||
// mountOptions configures the options from the command line flags
|
||||
//
|
||||
// man mount.fuse for more info and note the -o flag for other options
|
||||
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.MountOptions) {
|
||||
mountOpts = &fuse.MountOptions{
|
||||
AllowOther: fsys.opt.AllowOther,
|
||||
FsName: device,
|
||||
FsName: opt.DeviceName,
|
||||
Name: "rclone",
|
||||
DisableXAttrs: true,
|
||||
Debug: fsys.opt.DebugFUSE,
|
||||
@@ -120,7 +119,7 @@ func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
opts = append(opts,
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
fmt.Sprintf("volname=%s", device),
|
||||
fmt.Sprintf("volname=%s", opt.VolumeName),
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
@@ -167,7 +166,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
//mOpts.Debug = mountlib.DebugFUSE
|
||||
|
||||
//conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts)
|
||||
mountOpts := mountOptions(fsys, f)
|
||||
mountOpts := mountOptions(fsys, f, opt)
|
||||
|
||||
// FIXME fill out
|
||||
opts := fusefs.Options{
|
||||
|
||||
@@ -65,10 +65,10 @@ at all, then 1 PiB is set as both the total and the free size.
|
||||
To run rclone @ on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open-source
|
||||
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
uses combination with [cgofuse](https://github.com/winfsp/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone @ for Windows.
|
||||
|
||||
@@ -218,7 +218,7 @@ from Microsoft's Sysinternals suite, which has option |-s| to start
|
||||
processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
|
||||
@@ -40,6 +40,7 @@ type Options struct {
|
||||
ExtraOptions []string
|
||||
ExtraFlags []string
|
||||
AttrTimeout time.Duration // how long the kernel caches attribute for
|
||||
DeviceName string
|
||||
VolumeName string
|
||||
NoAppleDouble bool
|
||||
NoAppleXattr bool
|
||||
@@ -77,6 +78,17 @@ type MountPoint struct {
|
||||
ErrChan <-chan error
|
||||
}
|
||||
|
||||
// NewMountPoint makes a new mounting structure
|
||||
func NewMountPoint(mount MountFn, mountPoint string, f fs.Fs, mountOpt *Options, vfsOpt *vfscommon.Options) *MountPoint {
|
||||
return &MountPoint{
|
||||
MountFn: mount,
|
||||
MountPoint: mountPoint,
|
||||
Fs: f,
|
||||
MountOpt: *mountOpt,
|
||||
VFSOpt: *vfsOpt,
|
||||
}
|
||||
}
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
MaxLeafSize = 1024 // don't pass file names longer than this
|
||||
@@ -125,6 +137,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
|
||||
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
|
||||
// Windows and OSX
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
|
||||
// OSX only
|
||||
@@ -165,14 +178,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
defer cmd.StartStats()()
|
||||
}
|
||||
|
||||
mnt := &MountPoint{
|
||||
MountFn: mount,
|
||||
MountPoint: args[1],
|
||||
Fs: cmd.NewFsDir(args),
|
||||
MountOpt: Opt,
|
||||
VFSOpt: vfsflags.Opt,
|
||||
}
|
||||
|
||||
mnt := NewMountPoint(mount, args[1], cmd.NewFsDir(args), &Opt, &vfsflags.Opt)
|
||||
daemon, err := mnt.Mount()
|
||||
|
||||
// Wait for foreground mount, if any...
|
||||
@@ -235,6 +241,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
return nil, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||
|
||||
// Start background task if --daemon is specified
|
||||
if m.MountOpt.Daemon {
|
||||
@@ -250,6 +257,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
||||
}
|
||||
m.MountedOn = time.Now()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
@@ -117,23 +116,15 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
VFS := vfs.New(fdst, &vfsOpt)
|
||||
_, unmountFn, err := mountFn(VFS, mountPoint, &mountOpt)
|
||||
mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt)
|
||||
_, err = mnt.Mount()
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = &MountPoint{
|
||||
MountPoint: mountPoint,
|
||||
MountedOn: time.Now(),
|
||||
MountFn: mountFn,
|
||||
UnmountFn: unmountFn,
|
||||
MountOpt: mountOpt,
|
||||
VFSOpt: vfsOpt,
|
||||
Fs: fdst,
|
||||
}
|
||||
liveMounts[mountPoint] = mnt
|
||||
|
||||
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
|
||||
return nil, nil
|
||||
|
||||
@@ -16,11 +16,16 @@ import (
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
// Disable tests under macOS and the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
ctx := context.Background()
|
||||
configfile.Install()
|
||||
mount := rc.Calls.Get("mount/mount")
|
||||
@@ -30,19 +35,14 @@ func TestRc(t *testing.T) {
|
||||
getMountTypes := rc.Calls.Get("mount/types")
|
||||
assert.NotNil(t, getMountTypes)
|
||||
|
||||
localDir, err := ioutil.TempDir("", "rclone-mountlib-localDir")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(localDir) }()
|
||||
err = ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
localDir := t.TempDir()
|
||||
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
mountPoint, err := ioutil.TempDir("", "rclone-mountlib-mountPoint")
|
||||
require.NoError(t, err)
|
||||
mountPoint := t.TempDir()
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows requires the mount point not to exist
|
||||
require.NoError(t, os.RemoveAll(mountPoint))
|
||||
} else {
|
||||
defer func() { _ = os.RemoveAll(mountPoint) }()
|
||||
}
|
||||
|
||||
out, err := getMountTypes.Fn(ctx, nil)
|
||||
|
||||
@@ -87,7 +87,7 @@ func (m *MountPoint) CheckAllowings() error {
|
||||
// SetVolumeName with sensible default
|
||||
func (m *MountPoint) SetVolumeName(vol string) {
|
||||
if vol == "" {
|
||||
vol = m.Fs.Name() + ":" + m.Fs.Root()
|
||||
vol = fs.ConfigString(m.Fs)
|
||||
}
|
||||
m.MountOpt.SetVolumeName(vol)
|
||||
}
|
||||
@@ -102,3 +102,11 @@ func (o *Options) SetVolumeName(vol string) {
|
||||
}
|
||||
o.VolumeName = vol
|
||||
}
|
||||
|
||||
// SetDeviceName with sensible default
|
||||
func (m *MountPoint) SetDeviceName(dev string) {
|
||||
if dev == "" {
|
||||
dev = fs.ConfigString(m.Fs)
|
||||
}
|
||||
m.MountOpt.DeviceName = dev
|
||||
}
|
||||
|
||||
@@ -42,15 +42,31 @@ builds an in memory representation. rclone ncdu can be used during
|
||||
this scanning phase and you will see it building up the directory
|
||||
structure as it goes along.
|
||||
|
||||
Here are the keys - press '?' to toggle the help on and off
|
||||
You can interact with the user interface using key presses,
|
||||
press '?' to toggle the help on and off. The supported keys are:
|
||||
|
||||
` + strings.Join(helpText()[1:], "\n ") + `
|
||||
|
||||
Listed files/directories may be prefixed by a one-character flag,
|
||||
some of them combined with a description in brackes at end of line.
|
||||
These flags have the following meaning:
|
||||
|
||||
e means this is an empty directory, i.e. contains no files (but
|
||||
may contain empty subdirectories)
|
||||
~ means this is a directory where some of the files (possibly in
|
||||
subdirectories) have unknown size, and therefore the directory
|
||||
size may be underestimated (and average size inaccurate, as it
|
||||
is average of the files with known sizes).
|
||||
. means an error occurred while reading a subdirectory, and
|
||||
therefore the directory size may be underestimated (and average
|
||||
size inaccurate)
|
||||
! means an error occurred while reading this directory
|
||||
|
||||
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
|
||||
rclone remotes. It is missing lots of features at the moment
|
||||
but is useful as it stands.
|
||||
|
||||
Note that it might take some time to delete big files/folders. The
|
||||
Note that it might take some time to delete big files/directories. The
|
||||
UI won't respond in the meantime since the deletion is done synchronously.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
@@ -283,9 +299,9 @@ func (u *UI) biggestEntry() (biggest int64) {
|
||||
return
|
||||
}
|
||||
for i := range u.entries {
|
||||
size, _, _, _, _, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if size > biggest {
|
||||
biggest = size
|
||||
attrs, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if attrs.Size > biggest {
|
||||
biggest = attrs.Size
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -297,8 +313,8 @@ func (u *UI) hasEmptyDir() bool {
|
||||
return false
|
||||
}
|
||||
for i := range u.entries {
|
||||
_, count, isDir, _, _, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if isDir && count == 0 {
|
||||
attrs, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if attrs.IsDir && attrs.Count == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -343,9 +359,9 @@ func (u *UI) Draw() error {
|
||||
if y >= h-1 {
|
||||
break
|
||||
}
|
||||
size, count, isDir, readable, entriesHaveErrors, err := u.d.AttrI(u.sortPerm[n])
|
||||
attrs, err := u.d.AttrI(u.sortPerm[n])
|
||||
fg := termbox.ColorWhite
|
||||
if entriesHaveErrors {
|
||||
if attrs.EntriesHaveErrors {
|
||||
fg = termbox.ColorYellow
|
||||
}
|
||||
if err != nil {
|
||||
@@ -356,15 +372,19 @@ func (u *UI) Draw() error {
|
||||
fg, bg = bg, fg
|
||||
}
|
||||
mark := ' '
|
||||
if isDir {
|
||||
if attrs.IsDir {
|
||||
mark = '/'
|
||||
}
|
||||
fileFlag := ' '
|
||||
message := ""
|
||||
if !readable {
|
||||
if !attrs.Readable {
|
||||
message = " [not read yet]"
|
||||
}
|
||||
if entriesHaveErrors {
|
||||
if attrs.CountUnknownSize > 0 {
|
||||
message = fmt.Sprintf(" [%d of %d files have unknown size, size may be underestimated]", attrs.CountUnknownSize, attrs.Count)
|
||||
fileFlag = '~'
|
||||
}
|
||||
if attrs.EntriesHaveErrors {
|
||||
message = " [some subdirectories could not be read, size may be underestimated]"
|
||||
fileFlag = '.'
|
||||
}
|
||||
@@ -374,32 +394,29 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras := ""
|
||||
if u.showCounts {
|
||||
ss := operations.CountStringField(count, u.humanReadable, 9) + " "
|
||||
if count > 0 {
|
||||
ss := operations.CountStringField(attrs.Count, u.humanReadable, 9) + " "
|
||||
if attrs.Count > 0 {
|
||||
extras += ss
|
||||
} else {
|
||||
extras += strings.Repeat(" ", len(ss))
|
||||
}
|
||||
}
|
||||
var averageSize float64
|
||||
if count > 0 {
|
||||
averageSize = float64(size) / float64(count)
|
||||
}
|
||||
if u.showDirAverageSize {
|
||||
ss := operations.SizeStringField(int64(averageSize), u.humanReadable, 9) + " "
|
||||
if averageSize > 0 {
|
||||
avg := attrs.AverageSize()
|
||||
ss := operations.SizeStringField(int64(avg), u.humanReadable, 9) + " "
|
||||
if avg > 0 {
|
||||
extras += ss
|
||||
} else {
|
||||
extras += strings.Repeat(" ", len(ss))
|
||||
}
|
||||
}
|
||||
if showEmptyDir {
|
||||
if isDir && count == 0 && fileFlag == ' ' {
|
||||
if attrs.IsDir && attrs.Count == 0 && fileFlag == ' ' {
|
||||
fileFlag = 'e'
|
||||
}
|
||||
}
|
||||
if u.showGraph {
|
||||
bars := (size + perBar/2 - 1) / perBar
|
||||
bars := (attrs.Size + perBar/2 - 1) / perBar
|
||||
// clip if necessary - only happens during startup
|
||||
if bars > 10 {
|
||||
bars = 10
|
||||
@@ -408,7 +425,7 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||
}
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
y++
|
||||
}
|
||||
}
|
||||
@@ -559,14 +576,14 @@ type ncduSort struct {
|
||||
// Less is part of sort.Interface.
|
||||
func (ds *ncduSort) Less(i, j int) bool {
|
||||
var iAvgSize, jAvgSize float64
|
||||
isize, icount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jsize, jcount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iattrs, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jattrs, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
|
||||
if icount > 0 {
|
||||
iAvgSize = float64(isize / icount)
|
||||
if iattrs.Count > 0 {
|
||||
iAvgSize = iattrs.AverageSize()
|
||||
}
|
||||
if jcount > 0 {
|
||||
jAvgSize = float64(jsize / jcount)
|
||||
if jattrs.Count > 0 {
|
||||
jAvgSize = jattrs.AverageSize()
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -575,33 +592,33 @@ func (ds *ncduSort) Less(i, j int) bool {
|
||||
case ds.u.sortByName > 0:
|
||||
break
|
||||
case ds.u.sortBySize < 0:
|
||||
if isize != jsize {
|
||||
return isize < jsize
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size < jattrs.Size
|
||||
}
|
||||
case ds.u.sortBySize > 0:
|
||||
if isize != jsize {
|
||||
return isize > jsize
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
case ds.u.sortByCount < 0:
|
||||
if icount != jcount {
|
||||
return icount < jcount
|
||||
if iattrs.Count != jattrs.Count {
|
||||
return iattrs.Count < jattrs.Count
|
||||
}
|
||||
case ds.u.sortByCount > 0:
|
||||
if icount != jcount {
|
||||
return icount > jcount
|
||||
if iattrs.Count != jattrs.Count {
|
||||
return iattrs.Count > jattrs.Count
|
||||
}
|
||||
case ds.u.sortByAverageSize < 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize < jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize < jsize
|
||||
return iattrs.Size < jattrs.Size
|
||||
case ds.u.sortByAverageSize > 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize > jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize > jsize
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
// if everything equal, sort by name
|
||||
return iname < jname
|
||||
|
||||
@@ -16,14 +16,42 @@ type Dir struct {
|
||||
parent *Dir
|
||||
path string
|
||||
mu sync.Mutex
|
||||
count int64
|
||||
size int64
|
||||
count int64
|
||||
countUnknownSize int64
|
||||
entries fs.DirEntries
|
||||
dirs map[string]*Dir
|
||||
readError error
|
||||
entriesHaveErrors bool
|
||||
}
|
||||
|
||||
// Attrs contains accumulated properties for a directory entry
|
||||
//
|
||||
// Files with unknown size are counted separately but also included
|
||||
// in the total count. They are not included in the size, i.e. treated
|
||||
// as empty files, which means the size may be underestimated.
|
||||
type Attrs struct {
|
||||
Size int64
|
||||
Count int64
|
||||
CountUnknownSize int64
|
||||
IsDir bool
|
||||
Readable bool
|
||||
EntriesHaveErrors bool
|
||||
}
|
||||
|
||||
// AverageSize calculates average size of files in directory
|
||||
//
|
||||
// If there are files with unknown size, this returns the average over
|
||||
// files with known sizes, which means it may be under- or
|
||||
// overestimated.
|
||||
func (a *Attrs) AverageSize() float64 {
|
||||
countKnownSize := a.Count - a.CountUnknownSize
|
||||
if countKnownSize > 0 {
|
||||
return float64(a.Size) / float64(countKnownSize)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Parent returns the directory above this one
|
||||
func (d *Dir) Parent() *Dir {
|
||||
// no locking needed since these are write once in newDir()
|
||||
@@ -49,7 +77,13 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
||||
for _, entry := range entries {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
d.count++
|
||||
d.size += o.Size()
|
||||
size := o.Size()
|
||||
if size < 0 {
|
||||
// Some backends may return -1 because size of object is not known
|
||||
d.countUnknownSize++
|
||||
} else {
|
||||
d.size += size
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set my directory entry in parent
|
||||
@@ -62,8 +96,9 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
||||
// Accumulate counts in parents
|
||||
for ; parent != nil; parent = parent.parent {
|
||||
parent.mu.Lock()
|
||||
parent.count += d.count
|
||||
parent.size += d.size
|
||||
parent.count += d.count
|
||||
parent.countUnknownSize += d.countUnknownSize
|
||||
if d.readError != nil {
|
||||
parent.entriesHaveErrors = true
|
||||
}
|
||||
@@ -91,17 +126,24 @@ func (d *Dir) Remove(i int) {
|
||||
// Call with d.mu held
|
||||
func (d *Dir) remove(i int) {
|
||||
size := d.entries[i].Size()
|
||||
countUnknownSize := int64(0)
|
||||
if size < 0 {
|
||||
size = 0
|
||||
countUnknownSize = 1
|
||||
}
|
||||
count := int64(1)
|
||||
|
||||
subDir, ok := d.getDir(i)
|
||||
if ok {
|
||||
size = subDir.size
|
||||
count = subDir.count
|
||||
countUnknownSize = subDir.countUnknownSize
|
||||
delete(d.dirs, path.Base(subDir.path))
|
||||
}
|
||||
|
||||
d.size -= size
|
||||
d.count -= count
|
||||
d.countUnknownSize -= countUnknownSize
|
||||
d.entries = append(d.entries[:i], d.entries[i+1:]...)
|
||||
|
||||
dir := d
|
||||
@@ -111,6 +153,7 @@ func (d *Dir) remove(i int) {
|
||||
parent.dirs[path.Base(dir.path)] = dir
|
||||
parent.size -= size
|
||||
parent.count -= count
|
||||
parent.countUnknownSize -= countUnknownSize
|
||||
dir = parent
|
||||
parent.mu.Unlock()
|
||||
}
|
||||
@@ -151,19 +194,19 @@ func (d *Dir) Attr() (size int64, count int64) {
|
||||
}
|
||||
|
||||
// AttrI returns the size, count and flags for the i-th directory entry
|
||||
func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool, entriesHaveErrors bool, err error) {
|
||||
func (d *Dir) AttrI(i int) (attrs Attrs, err error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
subDir, isDir := d.getDir(i)
|
||||
|
||||
if !isDir {
|
||||
return d.entries[i].Size(), 0, false, true, d.entriesHaveErrors, d.readError
|
||||
return Attrs{d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
|
||||
}
|
||||
if subDir == nil {
|
||||
return 0, 0, true, false, false, nil
|
||||
return Attrs{0, 0, 0, true, false, false}, nil
|
||||
}
|
||||
size, count = subDir.Attr()
|
||||
return size, count, true, true, subDir.entriesHaveErrors, subDir.readError
|
||||
size, count := subDir.Attr()
|
||||
return Attrs{size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
|
||||
}
|
||||
|
||||
// Scan the Fs passed in, returning a root directory channel and an
|
||||
|
||||
@@ -290,7 +290,7 @@ func list(ctx context.Context) error {
|
||||
if !ok {
|
||||
return errors.New("bad JSON")
|
||||
}
|
||||
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], strings.Replace(info["Path"].(string), "/", "-", -1))
|
||||
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], strings.ReplaceAll(info["Path"].(string), "/", "-"))
|
||||
fmt.Printf("%s\n\n", info["Help"])
|
||||
if authRequired := info["AuthRequired"]; authRequired != nil {
|
||||
if authRequired.(bool) {
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 1.4 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 5.6 KiB After Width: | Height: | Size: 724 B |
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -303,6 +304,10 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
|
||||
}
|
||||
|
||||
func testMountAPI(t *testing.T, sockAddr string) {
|
||||
// Disable tests under macOS and linux in the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "linux" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil {
|
||||
t.Skip("Test requires working mount command")
|
||||
}
|
||||
|
||||
@@ -274,7 +274,6 @@ func (vol *Volume) mount(id string) error {
|
||||
if _, err := vol.mnt.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
vol.mnt.MountedOn = time.Now()
|
||||
vol.mountReqs[id] = nil
|
||||
vol.drv.monChan <- false // ask monitor to refresh channels
|
||||
return nil
|
||||
|
||||
@@ -16,7 +16,10 @@ import (
|
||||
)
|
||||
|
||||
// Help describes the options for the serve package
|
||||
var Help = `--template allows a user to specify a custom markup template for http
|
||||
var Help = `
|
||||
#### Template
|
||||
|
||||
--template allows a user to specify a custom markup template for http
|
||||
and webdav serve functions. The server exports the following markup
|
||||
to be used within the template to server pages:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
|
||||
flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
|
||||
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
|
||||
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
|
||||
|
||||
@@ -16,6 +16,7 @@ TestFichier:
|
||||
TestFTP:
|
||||
TestGoogleCloudStorage:
|
||||
TestHubic:
|
||||
TestNetStorage:
|
||||
TestOneDrive:
|
||||
TestPcloud:
|
||||
TestQingStor:
|
||||
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -113,14 +111,7 @@ func TestResticHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the tempdir is properly removed
|
||||
defer func() {
|
||||
err := os.RemoveAll(tempdir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
tempdir := t.TempDir()
|
||||
|
||||
// globally set append-only mode
|
||||
prev := appendOnly
|
||||
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -35,14 +33,7 @@ func TestResticPrivateRepositories(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the tempdir is properly removed
|
||||
defer func() {
|
||||
err := os.RemoveAll(tempdir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
tempdir := t.TempDir()
|
||||
|
||||
// globally set private-repos mode & test user
|
||||
prev := privateRepos
|
||||
|
||||
@@ -43,7 +43,7 @@ var shellUnEscapeRegex = regexp.MustCompile(`\\(.)`)
|
||||
|
||||
// Unescape a string that was escaped by rclone
|
||||
func shellUnEscape(str string) string {
|
||||
str = strings.Replace(str, "'\n'", "\n", -1)
|
||||
str = strings.ReplaceAll(str, "'\n'", "\n")
|
||||
str = shellUnEscapeRegex.ReplaceAllString(str, `$1`)
|
||||
return str
|
||||
}
|
||||
|
||||
@@ -24,26 +24,51 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "size remote:path",
|
||||
Short: `Prints the total size and number of objects in remote:path.`,
|
||||
Long: `
|
||||
Counts objects in the path and calculates the total size. Prints the
|
||||
result to standard output.
|
||||
|
||||
By default the output is in human-readable format, but shows values in
|
||||
both human-readable format as well as the raw numbers (global option
|
||||
` + "`--human-readable`" + ` is not considered). Use option ` + "`--json`" + `
|
||||
to format output as JSON instead.
|
||||
|
||||
Recurses by default, use ` + "`--max-depth 1`" + ` to stop the
|
||||
recursion.
|
||||
|
||||
Some backends do not always provide file sizes, see for example
|
||||
[Google Photos](/googlephotos/#size) and
|
||||
[Google Drive](/drive/#limitations-of-google-docs).
|
||||
Rclone will then show a notice in the log indicating how many such
|
||||
files were encountered, and count them in as empty files in the output
|
||||
of the size command.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
var err error
|
||||
var results struct {
|
||||
Count int64 `json:"count"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Count int64 `json:"count"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Sizeless int64 `json:"sizeless"`
|
||||
}
|
||||
|
||||
results.Count, results.Bytes, err = operations.Count(context.Background(), fsrc)
|
||||
results.Count, results.Bytes, results.Sizeless, err = operations.Count(context.Background(), fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if results.Sizeless > 0 {
|
||||
fs.Logf(fsrc, "Size may be underestimated due to %d objects with unknown size", results.Sizeless)
|
||||
}
|
||||
if jsonOutput {
|
||||
return json.NewEncoder(os.Stdout).Encode(results)
|
||||
}
|
||||
fmt.Printf("Total objects: %s (%d)\n", fs.CountSuffix(results.Count), results.Count)
|
||||
fmt.Printf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
||||
if results.Sizeless > 0 {
|
||||
fmt.Printf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
},
|
||||
|
||||
@@ -8,6 +8,7 @@ exec rclone --check-normalization=true --check-control=true --check-length=true
|
||||
TestDrive:testInfo \
|
||||
TestDropbox:testInfo \
|
||||
TestGoogleCloudStorage:rclone-testinfo \
|
||||
TestnStorage:testInfo \
|
||||
TestOneDrive:testInfo \
|
||||
TestS3:rclone-testinfo \
|
||||
TestSftp:testInfo \
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user