mirror of
https://github.com/rclone/rclone.git
synced 2025-12-21 10:43:37 +00:00
Compare commits
303 Commits
fix-max-me
...
build
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
23e9066b4f | ||
|
|
693ca3215f | ||
|
|
321cf23e9c | ||
|
|
7e8d4bd915 | ||
|
|
06f45e0ac0 | ||
|
|
4af2f01abc | ||
|
|
dd3fff6eae | ||
|
|
ca6631746a | ||
|
|
e5fe0b1476 | ||
|
|
4c5764204d | ||
|
|
d70f40229e | ||
|
|
05b13b47b5 | ||
|
|
ecd52aa809 | ||
|
|
269abb1aee | ||
|
|
d91cbb2626 | ||
|
|
9073d17313 | ||
|
|
cc20d93f47 | ||
|
|
cb1507fa96 | ||
|
|
b0b3b04b3b | ||
|
|
8d878d0a5f | ||
|
|
8d353039a6 | ||
|
|
4b777db20b | ||
|
|
16ad0c2aef | ||
|
|
e46dec2a94 | ||
|
|
2b54b63cb3 | ||
|
|
f2eb5f35f6 | ||
|
|
d9a36ef45c | ||
|
|
eade7710e7 | ||
|
|
e6470d998c | ||
|
|
0c0fb93111 | ||
|
|
3f60764bd4 | ||
|
|
8f84f91666 | ||
|
|
2c91772bf1 | ||
|
|
c3f721755d | ||
|
|
8a952583a5 | ||
|
|
fc5bd21e28 | ||
|
|
be73a10a97 | ||
|
|
7edf8eb233 | ||
|
|
99144dcbba | ||
|
|
8f90f830bd | ||
|
|
456108f29e | ||
|
|
f7968aad1c | ||
|
|
2a587d21c4 | ||
|
|
4b0df05907 | ||
|
|
a92af34825 | ||
|
|
8ffde402f6 | ||
|
|
117d8d9fdb | ||
|
|
5050f42b8b | ||
|
|
fcbcdea067 | ||
|
|
d4e68bf66b | ||
|
|
743d160fdd | ||
|
|
dc95f36bc1 | ||
|
|
d3e3af377a | ||
|
|
db4812fbfa | ||
|
|
ff9cbab5fa | ||
|
|
30d8ab5f2f | ||
|
|
d71a4195d6 | ||
|
|
64ed9b175f | ||
|
|
2b10340e4e | ||
|
|
3c596f8d11 | ||
|
|
6a9c221841 | ||
|
|
c49b24ff90 | ||
|
|
edbbfd1e86 | ||
|
|
0e0af7499c | ||
|
|
eb4fe3ef4c | ||
|
|
70eb0f21d9 | ||
|
|
12378bae27 | ||
|
|
3c08c4df3a | ||
|
|
897509ae10 | ||
|
|
0eb7ee2e16 | ||
|
|
c1ebfb7e04 | ||
|
|
3d62058693 | ||
|
|
122890799f | ||
|
|
65078d5846 | ||
|
|
92f304902d | ||
|
|
45477a6c7d | ||
|
|
79b549b5a4 | ||
|
|
318880b4ad | ||
|
|
75521dcf6e | ||
|
|
8bf20dd545 | ||
|
|
744bce1246 | ||
|
|
c817fc5c57 | ||
|
|
0bb4d0a985 | ||
|
|
a8605abd34 | ||
|
|
953fb4490b | ||
|
|
b17c3d18af | ||
|
|
b45580fa19 | ||
|
|
1c26f40078 | ||
|
|
667ad093eb | ||
|
|
2c369aedf5 | ||
|
|
7a0d5ab0b4 | ||
|
|
75582b804b | ||
|
|
73452551c6 | ||
|
|
cb3cf5068b | ||
|
|
428f518771 | ||
|
|
0411a41e11 | ||
|
|
07b37bcd12 | ||
|
|
0506826ff5 | ||
|
|
4fcd36a5ab | ||
|
|
b2f43f39ba | ||
|
|
074d73d12b | ||
|
|
6457bcf51e | ||
|
|
8d12519f3d | ||
|
|
8a7c401366 | ||
|
|
0aae8f346f | ||
|
|
e991328967 | ||
|
|
614d02a673 | ||
|
|
018ebdded5 | ||
|
|
fc08983d71 | ||
|
|
7b61084891 | ||
|
|
d1ac6c2fe1 | ||
|
|
da9c99272c | ||
|
|
9c7594d78f | ||
|
|
70226cc653 | ||
|
|
c20e4bd99c | ||
|
|
ccfe153e9b | ||
|
|
c9730bcaaf | ||
|
|
03dd7486c1 | ||
|
|
6249009fdf | ||
|
|
8e2d76459f | ||
|
|
5e539c6a72 | ||
|
|
8866112400 | ||
|
|
bfdd5e2c22 | ||
|
|
f3f16cd2b9 | ||
|
|
d84ea2ec52 | ||
|
|
b259241c07 | ||
|
|
a8ab0730a7 | ||
|
|
cef207cf94 | ||
|
|
e728ea32d1 | ||
|
|
ccdee0420f | ||
|
|
8a51e11d23 | ||
|
|
9083f1ff15 | ||
|
|
2964b1a169 | ||
|
|
b6767820de | ||
|
|
821e7fce45 | ||
|
|
b7c6268d3e | ||
|
|
521d6b88d4 | ||
|
|
cf767b0856 | ||
|
|
25f7809822 | ||
|
|
74c0b1ea3b | ||
|
|
f4dcb1e9cf | ||
|
|
90f1d023ff | ||
|
|
e9c5f2d4e8 | ||
|
|
1249e9b5ac | ||
|
|
d47bc5f6c4 | ||
|
|
efb1794135 | ||
|
|
71b98a03a9 | ||
|
|
8e625c6593 | ||
|
|
6b2cd7c631 | ||
|
|
aa4aead63c | ||
|
|
c491d12cd0 | ||
|
|
9e4d703a56 | ||
|
|
fc0c0a7771 | ||
|
|
d5cc0d83b0 | ||
|
|
52762dc866 | ||
|
|
3c092cfc17 | ||
|
|
7f3f1af541 | ||
|
|
f885c481f0 | ||
|
|
865d4b2bda | ||
|
|
3cb1e65eb6 | ||
|
|
f667346718 | ||
|
|
c6e1f59415 | ||
|
|
f353c92852 | ||
|
|
1e88c6a18b | ||
|
|
7242aed1c3 | ||
|
|
81e63785fe | ||
|
|
c7937f53d4 | ||
|
|
58fa1c975f | ||
|
|
da49fc1b6d | ||
|
|
df9c921dd5 | ||
|
|
d9c227eff6 | ||
|
|
524c285d88 | ||
|
|
4107246335 | ||
|
|
87a65ec6a5 | ||
|
|
c6d0b61982 | ||
|
|
88e30eecbf | ||
|
|
f904378c4d | ||
|
|
24eb8dcde0 | ||
|
|
a97425d9cb | ||
|
|
c51878f9a9 | ||
|
|
92f0a73ac6 | ||
|
|
163c149f3f | ||
|
|
224ca0ae8e | ||
|
|
5bf6cd1f4f | ||
|
|
555739eec5 | ||
|
|
c036ce90fe | ||
|
|
6163ae7cc7 | ||
|
|
cd950e30cb | ||
|
|
89dfae96ad | ||
|
|
c0a2d730a6 | ||
|
|
592407230b | ||
|
|
a7c3ddb482 | ||
|
|
7a1813c531 | ||
|
|
16e3d1becd | ||
|
|
c0f6b910ae | ||
|
|
e3bf8dc122 | ||
|
|
086a835131 | ||
|
|
d0668de192 | ||
|
|
4df974ccc4 | ||
|
|
a50c903a82 | ||
|
|
97a8092c14 | ||
|
|
526565b810 | ||
|
|
64804b81bd | ||
|
|
e10f516a5e | ||
|
|
fe62a2bb4e | ||
|
|
d6ecb949ca | ||
|
|
a845a96538 | ||
|
|
92f30fda8d | ||
|
|
559ef2eba8 | ||
|
|
17b25d7ce2 | ||
|
|
fe3253eefd | ||
|
|
c38ca6b2d1 | ||
|
|
5aa9811084 | ||
|
|
3cae373064 | ||
|
|
b6b8526fb4 | ||
|
|
6f86143176 | ||
|
|
beffef2882 | ||
|
|
96f5bbcdd7 | ||
|
|
27ce78bee4 | ||
|
|
898a59062b | ||
|
|
c5f55243e1 | ||
|
|
62a9727ab5 | ||
|
|
16f1e08b73 | ||
|
|
4280ec75cc | ||
|
|
b064cc2116 | ||
|
|
f8b50f8d8f | ||
|
|
9d464e8e9a | ||
|
|
92fea7eb1b | ||
|
|
f226d12a2f | ||
|
|
359260c49d | ||
|
|
125c8a98bb | ||
|
|
81fccd9c39 | ||
|
|
1dc3421c7f | ||
|
|
073184132e | ||
|
|
476ff65fd7 | ||
|
|
2847412433 | ||
|
|
5c81132da0 | ||
|
|
6e1c7b9239 | ||
|
|
e469c8974c | ||
|
|
629b427443 | ||
|
|
108504963c | ||
|
|
6aa09fb1d6 | ||
|
|
bfa6852334 | ||
|
|
63d55d4a39 | ||
|
|
578ee49550 | ||
|
|
dda6a863e9 | ||
|
|
99358cee88 | ||
|
|
768a4236e6 | ||
|
|
ffbf002ba8 | ||
|
|
4a1b5b864c | ||
|
|
3b3096c940 | ||
|
|
51fd697c7a | ||
|
|
210acb42cd | ||
|
|
6c36615efe | ||
|
|
d4e2717081 | ||
|
|
013c563293 | ||
|
|
41a407dcc9 | ||
|
|
cf1f5a7af6 | ||
|
|
597872e5d7 | ||
|
|
e2d6872745 | ||
|
|
ddebca8d42 | ||
|
|
5173ca0454 | ||
|
|
ccac9813f3 | ||
|
|
9133fd03df | ||
|
|
2e891f4ff8 | ||
|
|
3c66d9ccb1 | ||
|
|
badf16cc34 | ||
|
|
0ee7cd80f2 | ||
|
|
aeb43c6a4c | ||
|
|
12322a2141 | ||
|
|
4fd5a3d0a2 | ||
|
|
3594330177 | ||
|
|
15510c66d4 | ||
|
|
dfa4d94827 | ||
|
|
36b89960e3 | ||
|
|
a3f3fc61ee | ||
|
|
b8fde4fc46 | ||
|
|
c37fe733df | ||
|
|
b31659904f | ||
|
|
ebcf51336e | ||
|
|
a334bba643 | ||
|
|
d4fd93e7f3 | ||
|
|
6644bdba0f | ||
|
|
68a65e878f | ||
|
|
7606ad8294 | ||
|
|
32847e88b4 | ||
|
|
2e879586bd | ||
|
|
9d55b2411f | ||
|
|
fe880c0fac | ||
|
|
b160089be7 | ||
|
|
c2254164f8 | ||
|
|
e57b94c4ac | ||
|
|
3273bf3716 | ||
|
|
f5501edfcf | ||
|
|
2404831725 | ||
|
|
9f0e237931 | ||
|
|
f752eaa298 | ||
|
|
1f8373fae8 | ||
|
|
b94f80b9d7 | ||
|
|
5f4e983ccb | ||
|
|
28b6f38135 | ||
|
|
6adb4056bb | ||
|
|
0b9671313b |
60
.github/workflows/build.yml
vendored
60
.github/workflows/build.yml
vendored
@@ -23,15 +23,18 @@ jobs:
|
||||
build:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 60
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -42,14 +45,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -58,14 +61,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -75,14 +78,14 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
go: '>=1.25.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.23
|
||||
- job_name: go1.24
|
||||
os: ubuntu-latest
|
||||
go: '1.23'
|
||||
go: '1.24'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -92,7 +95,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -103,7 +106,6 @@ jobs:
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
@@ -112,7 +114,6 @@ jobs:
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
@@ -122,7 +123,6 @@ jobs:
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
@@ -151,7 +151,6 @@ jobs:
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
@@ -163,29 +162,24 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Run librclone tests
|
||||
shell: bash
|
||||
run: |
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
@@ -193,14 +187,12 @@ jobs:
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||
make ci_beta
|
||||
@@ -219,13 +211,12 @@ jobs:
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -233,7 +224,7 @@ jobs:
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '1.24'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
@@ -291,8 +282,18 @@ jobs:
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Check Markdown format
|
||||
uses: DavidAnson/markdownlint-cli2-action@v20
|
||||
with:
|
||||
globs: |
|
||||
CONTRIBUTING.md
|
||||
MAINTAINERS.md
|
||||
README.md
|
||||
RELEASE.md
|
||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
android:
|
||||
@@ -303,7 +304,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -311,10 +312,9 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
go-version: '>=1.25.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
@@ -333,7 +333,6 @@ jobs:
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -347,7 +346,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -360,7 +358,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
@@ -373,7 +370,6 @@ jobs:
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -198,7 +198,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
43
.markdownlint.yml
Normal file
43
.markdownlint.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
default: true
|
||||
|
||||
# Use specific styles, to be consistent accross all documents.
|
||||
# Default is to accept any as long as it is consistent within the same document.
|
||||
heading-style: # MD003
|
||||
style: atx
|
||||
ul-style: # MD004
|
||||
style: dash
|
||||
hr-style: # MD035
|
||||
style: ---
|
||||
code-block-style: # MD046
|
||||
style: fenced
|
||||
code-fence-style: # MD048
|
||||
style: backtick
|
||||
emphasis-style: # MD049
|
||||
style: asterisk
|
||||
strong-style: # MD050
|
||||
style: asterisk
|
||||
|
||||
# Allow multiple headers with same text as long as they are not siblings.
|
||||
no-duplicate-heading: # MD024
|
||||
siblings_only: true
|
||||
|
||||
# Allow long lines in code blocks and tables.
|
||||
line-length: # MD013
|
||||
code_blocks: false
|
||||
tables: false
|
||||
|
||||
# The Markdown files used to generated docs with Hugo contain a top level
|
||||
# header, even though the YAML front matter has a title property (which is
|
||||
# used for the HTML document title only). Suppress Markdownlint warning:
|
||||
# Multiple top-level headings in the same document.
|
||||
single-title: # MD025
|
||||
level: 1
|
||||
front_matter_title:
|
||||
|
||||
# The HTML docs generated by Hugo from Markdown files may have slightly
|
||||
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
|
||||
# leading dashes so "--config string" becomes "#config-string" while it is
|
||||
# "#--config-string" in GitHub preview. When writing links to headers in the
|
||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
||||
link-fragments: false # MD051
|
||||
352
CONTRIBUTING.md
352
CONTRIBUTING.md
@@ -15,61 +15,81 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
- Rclone version (e.g. output from `rclone version`)
|
||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
- A log of the command with the `-vv` flag (e.g. output from
|
||||
`rclone -vv copy /tmp remote:tmp`)
|
||||
- if the log contains secrets then edit the file with a text editor first to
|
||||
obscure them
|
||||
|
||||
## Submitting a new feature or bug fix
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
|
||||
first so it can be discussed.
|
||||
|
||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution
|
||||
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
|
||||
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
|
||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||
Next open your terminal, change directory to your preferred folder and initialise
|
||||
your local rclone project:
|
||||
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
```sh
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
```
|
||||
|
||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||
Note that most of the terminal commands in the rest of this guide must be
|
||||
executed from the rclone folder created above.
|
||||
|
||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
go version
|
||||
```sh
|
||||
go version
|
||||
```
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
go build
|
||||
./rclone version
|
||||
```sh
|
||||
go build
|
||||
./rclone version
|
||||
```
|
||||
|
||||
(Note that you can also replace `go build` with `make`, which will include a
|
||||
more accurate version number in the executable as well as enable you to specify
|
||||
more build options.) Finally make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
```sh
|
||||
git checkout -b my-new-feature
|
||||
```
|
||||
|
||||
And get hacking.
|
||||
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
|
||||
and a quick view on the rclone [code organisation](#code-organisation).
|
||||
|
||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||
When ready - test the affected functionality and run the unit tests for the
|
||||
code you changed
|
||||
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
```sh
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
```
|
||||
|
||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||
This is typically enough if you made a simple bug fix, otherwise please read
|
||||
the rclone [testing](#testing) section too.
|
||||
|
||||
Make sure you
|
||||
|
||||
@@ -79,14 +99,19 @@ Make sure you
|
||||
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
```sh
|
||||
git push -u origin my-new-feature
|
||||
```
|
||||
|
||||
and open the GitHub website to [create your pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff.
|
||||
If so, then make the changes in the same branch, commit and push your updates to
|
||||
GitHub.
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
|
||||
or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and GitHub
|
||||
|
||||
@@ -94,87 +119,118 @@ You may sometimes be asked to [base your changes on the latest master](#basing-y
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
```sh
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
```
|
||||
|
||||
You can modify the message or changes in the latest commit using:
|
||||
|
||||
git commit --amend
|
||||
```sh
|
||||
git commit --amend
|
||||
```
|
||||
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Replacing your previously pushed commits
|
||||
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good
|
||||
practice to involve your collaborators before modifying commits that have been
|
||||
pushed to GitHub.
|
||||
|
||||
Your previously pushed commits are replaced by:
|
||||
|
||||
git push --force origin my-new-feature
|
||||
```sh
|
||||
git push --force origin my-new-feature
|
||||
```
|
||||
|
||||
### Basing your changes on the latest master
|
||||
|
||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
To base your changes on the latest version of the
|
||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
```sh
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
```
|
||||
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Squashing your commits ###
|
||||
### Squashing your commits
|
||||
|
||||
To combine your commits into one commit:
|
||||
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
```sh
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
```
|
||||
|
||||
If everything is fine, then make the new combined commit:
|
||||
|
||||
git commit # To commit the undone commits as one
|
||||
```sh
|
||||
git commit # To commit the undone commits as one
|
||||
```
|
||||
|
||||
otherwise, you may roll back using:
|
||||
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
```sh
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
```
|
||||
|
||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you squash commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a
|
||||
more complex situation.
|
||||
|
||||
### GitHub Continuous Integration
|
||||
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
|
||||
to build and test the project, which should be automatically available for your
|
||||
fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing
|
||||
|
||||
### Code quality tests
|
||||
|
||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
|
||||
you can run the same tests as get run in the CI which can be very helpful.
|
||||
|
||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
||||
|
||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
||||
Using these tests ensures that the rclone codebase all uses the same coding
|
||||
standards. These tests also check for easy mistakes to make (like forgetting
|
||||
to check an error return).
|
||||
|
||||
### Quick testing
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
```sh
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
You can also use `make`, if supported by your platform
|
||||
|
||||
make quicktest
|
||||
```sh
|
||||
make quicktest
|
||||
```
|
||||
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration)
|
||||
when you push your branch to GitHub.
|
||||
|
||||
### Backend testing
|
||||
|
||||
@@ -190,41 +246,51 @@ need to make a remote called `TestDrive`.
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd backend/drive
|
||||
go test -v
|
||||
```sh
|
||||
cd backend/drive
|
||||
go test -v
|
||||
```
|
||||
|
||||
You can then run the integration tests which test all of rclone's
|
||||
operations. Normally these get run against the local file system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
```sh
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
```
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
```sh
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
```
|
||||
|
||||
### Full integration testing
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make check
|
||||
make test
|
||||
```sh
|
||||
make check
|
||||
make test
|
||||
```
|
||||
|
||||
The commands may require some extra go packages which you can install with
|
||||
|
||||
make build_dep
|
||||
```sh
|
||||
make build_dep
|
||||
```
|
||||
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
find the results at <https://pub.rclone.org/integration-tests/>
|
||||
|
||||
## Code Organisation
|
||||
|
||||
@@ -240,8 +306,10 @@ with modules beneath.
|
||||
- ...commands
|
||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
- docs - the documentation and website
|
||||
- content - adjust these docs only - everything else is autogenerated
|
||||
- command - these are auto-generated - edit the corresponding .go file
|
||||
- content - adjust these docs only, except those marked autogenerated
|
||||
or portions marked autogenerated where the corresponding .go file must be
|
||||
edited instead, and everything else is autogenerated
|
||||
- commands - these are auto-generated, edit the corresponding .go file
|
||||
- fs - main rclone definitions - minimal amount of code
|
||||
- accounting - bandwidth limiting and statistics
|
||||
- asyncreader - an io.Reader which reads ahead
|
||||
@@ -279,6 +347,36 @@ with modules beneath.
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
The documentation sources are generally in Markdown format, in conformance
|
||||
with the CommonMark specification and compatible with GitHub Flavored
|
||||
Markdown (GFM). The markdown format is checked as part of the lint operation
|
||||
that runs automatically on pull requests, to enforce standards and consistency.
|
||||
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||
tool, which can also be integrated into editors so you can perform the same
|
||||
checks while writing.
|
||||
|
||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
||||
there is currently used a different algorithm for generating header anchors
|
||||
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
|
||||
generated by Hugo any leading `-` characters are ignored, which means when
|
||||
linking to a header with text `--config string` we therefore need to use the
|
||||
link `#config-string` in our Markdown source, which will not work in GitHub's
|
||||
preview where `#--config-string` would be the correct link.
|
||||
|
||||
Most of the documentation are written directly in text files with extension
|
||||
`.md`, mainly within folder `docs/content`. Note that several of such files
|
||||
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
|
||||
or contain autogenerated portions (e.g. the backend documentation under
|
||||
`docs/content/commands`). These are marked with an `autogenerated` comment.
|
||||
The sources of the autogenerated text are usually Markdown formatted text
|
||||
embedded as string values in the Go source code, so you need to locate these
|
||||
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
|
||||
files in the root of the repository are also autogenerated. The autogeneration
|
||||
of files, and the website, will be done during the release process. See the
|
||||
`make doc` and `make website` targets in the Makefile if you are interested in
|
||||
how. You don't need to run these when adding a feature.
|
||||
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order.
|
||||
@@ -307,19 +405,20 @@ the source file in the `Help:` field.
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
When writing documentation for an entirely new backend,
|
||||
see [backend documentation](#backend-documentation).
|
||||
|
||||
Documentation for rclone sub commands is with their code, e.g.
|
||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
||||
line, without a period/full stop character at the end, as it will be
|
||||
combined unmodified with other information (such as any default value).
|
||||
If you are updating documentation for a command, you must do that in the
|
||||
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
|
||||
sentence on a single line, without a period/full stop character at the end,
|
||||
as it will be combined unmodified with other information (such as any default
|
||||
value).
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
Note that you can use
|
||||
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy. Just remember the
|
||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
||||
Markdown preview may not be an entirely reliable verification of the results.
|
||||
|
||||
## Making a release
|
||||
|
||||
@@ -350,13 +449,13 @@ change will get linked into the issue.
|
||||
|
||||
Here is an example of a short commit message:
|
||||
|
||||
```
|
||||
```text
|
||||
drive: add team drive support - fixes #885
|
||||
```
|
||||
|
||||
And here is an example of a longer one:
|
||||
|
||||
```
|
||||
```text
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances, if an upload failed then the mount could hang
|
||||
@@ -379,7 +478,9 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
`go.mod` and `go.sum`.
|
||||
|
||||
go get github.com/ncw/new_dependency
|
||||
```sh
|
||||
go get github.com/ncw/new_dependency
|
||||
```
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
@@ -391,7 +492,9 @@ and `go.sum` in the same commit as your other changes.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
go get golang.org/x/crypto
|
||||
```sh
|
||||
go get golang.org/x/crypto
|
||||
```
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
@@ -434,12 +537,18 @@ remote or an fs.
|
||||
### Getting going
|
||||
|
||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
||||
- box is a good one to start from if you have a directory-based remote (and
|
||||
shows how to use the directory cache)
|
||||
- b2 is a good one to start from if you have a bucket-based remote
|
||||
- Add your remote to the imports in `backend/all/all.go`
|
||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
- HTTP based remotes are easiest to maintain if they use rclone's
|
||||
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
|
||||
if there is a really good Go SDK from the provider then use that instead.
|
||||
- Try to implement as many optional methods as possible as it makes the remote
|
||||
more usable.
|
||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
|
||||
make sure we can encode any path name and `rclone info` to help determine the
|
||||
encodings needed
|
||||
- `rclone purge -v TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
@@ -447,12 +556,19 @@ remote or an fs.
|
||||
|
||||
### Guidelines for a speedy merge
|
||||
|
||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
|
||||
if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
|
||||
if your backend is HTTP based - this adds features like `--dump bodies`,
|
||||
`--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** follow your example backend exactly - use the same code order, function
|
||||
names, layout, structure. **Don't** move stuff around and **Don't** delete the
|
||||
comments.
|
||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
|
||||
backends like that - don't follow them!)
|
||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
||||
- **Remember** we have >50 backends to maintain so keeping them as similar as
|
||||
possible to each other is a high priority!
|
||||
|
||||
### Unit tests
|
||||
|
||||
@@ -463,7 +579,8 @@ remote or an fs.
|
||||
### Integration tests
|
||||
|
||||
- Add your backend to `fstest/test_all/config.yaml`
|
||||
- Once you've done that then you can use the integration test framework from the project root:
|
||||
- Once you've done that then you can use the integration test framework from
|
||||
the project root:
|
||||
- go install ./...
|
||||
- test_all -backends remote
|
||||
|
||||
@@ -487,10 +604,13 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
- `README.md` - main GitHub page
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are
|
||||
automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your
|
||||
reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
||||
table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
@@ -513,6 +633,7 @@ You'll need to modify the following files
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Make sure this is in alphabetical order in the `Providers` section.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
@@ -541,34 +662,51 @@ For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone
|
||||
|
||||
## Writing a plugin
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go
|
||||
plugins. Changes will be kept in a dynamically loaded file instead of being
|
||||
compiled into the main binary. This is useful if you can't merge your changes
|
||||
upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
### Usage
|
||||
|
||||
- Naming
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
(The rclone used during building the plugin must be the same as the source
|
||||
of rclone)
|
||||
|
||||
### Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and
|
||||
host Go version match.
|
||||
|
||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
or a command out of the main rclone source tree.
|
||||
|
||||
So for example if you had a backend which accessed your proprietary
|
||||
systems or a command which was specialised for your needs you could
|
||||
add them out of tree.
|
||||
|
||||
This may be easier than using a plugin and is supported on all
|
||||
platforms not just macOS and Linux.
|
||||
|
||||
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
|
||||
which has an example of an out of tree backend `ram` (which is a
|
||||
renamed version of the `memory` backend).
|
||||
|
||||
118
MAINTAINERS.md
118
MAINTAINERS.md
@@ -1,4 +1,4 @@
|
||||
# Maintainers guide for rclone #
|
||||
# Maintainers guide for rclone
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
@@ -24,80 +24,108 @@ Current active maintainers of rclone are:
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
## This is a work in progress draft
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up
|
||||
of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
## Triaging Tickets
|
||||
|
||||
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||
When a ticket comes in it should be triaged. This means it should be classified
|
||||
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
|
||||
of back and forth to determine whether it is a valid ticket so tickets may
|
||||
remain without labels or milestone for a while.
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definitely verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - to do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
* `thinking` - not decided on the course of action yet
|
||||
- `bug` - a definitely verified bug
|
||||
- `can't reproduce` - a problem which we can't reproduce
|
||||
- `doc fix` - a bug in the documentation - if users need help understanding the
|
||||
docs add this label
|
||||
- `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
- `enhancement: new remote` - a new rclone backend
|
||||
- `enhancement` - a new feature
|
||||
- `FUSE` - to do with `rclone mount` command
|
||||
- `good first issue` - mark these if you find a small self-contained issue -
|
||||
these get shown to new visitors to the project
|
||||
- `help` wanted - mark these if you find a self-contained issue - these get
|
||||
shown to new visitors to the project
|
||||
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
- `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
- `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
- `Remote: XXX` - which rclone backend this affects
|
||||
- `thinking` - not decided on the course of action yet
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with
|
||||
the appropriate other tags. Don't forget the "good first issue" tag to give new
|
||||
contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||
When a ticket is tagged it should be added to a milestone, either the next
|
||||
release, the one after, Soon or Help Wanted. Bugs can be added to the
|
||||
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
|
||||
something (e.g. the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
- v1.XX - stuff we would like to fit into this release
|
||||
- v1.XX+1 - stuff we are leaving until the next release
|
||||
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
- Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
- Known bugs - bugs waiting on external factors or we aren't going to fix for
|
||||
the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
|
||||
are good candidates for ones that have slipped between the gaps and need
|
||||
following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
## Closing Tickets
|
||||
|
||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
Close tickets as soon as you can - make sure they are tagged with a release.
|
||||
Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
|
||||
## Pull requests ##
|
||||
## Pull requests
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can
|
||||
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
|
||||
Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
After merging the commit, in your local master branch, do `git pull` then run
|
||||
`bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||
Sometimes pull requests need to be left open for a while - this especially true
|
||||
of contributions of new backends which take a long time to get right.
|
||||
|
||||
## Merges ##
|
||||
## Merges
|
||||
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to
|
||||
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
|
||||
## Release cycle ##
|
||||
## Release cycle
|
||||
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
||||
if there is something big to merge that didn't stabilize properly or for personal
|
||||
reasons.
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
Near the start of the release cycle, the dependencies should be updated with
|
||||
`make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
Towards the end of the release cycle try not to merge anything too big so let
|
||||
things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the
|
||||
testing part is the most time-consuming often needing several rounds of test
|
||||
and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
## Mailing list
|
||||
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on
|
||||
google groups.
|
||||
|
||||
## TODO ##
|
||||
## TODO
|
||||
|
||||
I should probably make a dev@rclone.org to register with cloud providers.
|
||||
I should probably make a <dev@rclone.org> to register with cloud providers.
|
||||
|
||||
3931
MANUAL.html
generated
3931
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
3234
MANUAL.txt
generated
3234
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
2
Makefile
2
Makefile
@@ -243,7 +243,7 @@ fetch_binaries:
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
|
||||
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
|
||||
272
README.md
272
README.md
@@ -1,22 +1,6 @@
|
||||
<div align="center">
|
||||
<sup>Special thanks to our sponsor:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||
<div>
|
||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||
</div>
|
||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||
<div>
|
||||
<sup>Visit warp.dev to learn more.</sup>
|
||||
</div>
|
||||
</a>
|
||||
<br>
|
||||
<hr>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
<!-- markdownlint-disable-next-line no-inline-html -->
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
@@ -34,97 +18,105 @@
|
||||
|
||||
# Rclone
|
||||
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
directories to and from different cloud storage providers.
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
- MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||
- Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
||||
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
||||
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
- put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
||||
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
@@ -132,50 +124,54 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
- MD5/SHA-1 hashes checked at all times for file integrity
|
||||
- Timestamps preserved on files
|
||||
- Partial syncs supported on a whole file basis
|
||||
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
|
||||
files
|
||||
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
|
||||
identical
|
||||
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
|
||||
bidirectionally
|
||||
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
|
||||
equality
|
||||
- Can sync to and from network, e.g. two different cloud accounts
|
||||
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
- Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
- Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
- Multi-threaded downloads to local disk
|
||||
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
|
||||
over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
- [Installation](https://rclone.org/install/)
|
||||
- [Documentation & configuration](https://rclone.org/docs/)
|
||||
- [Changelog](https://rclone.org/changelog/)
|
||||
- [FAQ](https://rclone.org/faq/)
|
||||
- [Storage providers](https://rclone.org/overview/)
|
||||
- [Forum](https://forum.rclone.org/)
|
||||
- ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
- <https://rclone.org/downloads/>
|
||||
|
||||
License
|
||||
-------
|
||||
## License
|
||||
|
||||
This is free software under the terms of the MIT license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
153
RELEASE.md
153
RELEASE.md
@@ -4,51 +4,54 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
- [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
- pandoc for making the html and man pages
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull # IMPORTANT
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
|
||||
* make tidy
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
- git checkout master # see below for stable branch
|
||||
- git pull # IMPORTANT
|
||||
- git status - make sure everything is checked in
|
||||
- Check GitHub actions build for master is Green
|
||||
- make test # see integration test server or run locally
|
||||
- make tag
|
||||
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
|
||||
releases
|
||||
- make tidy
|
||||
- make doc
|
||||
- git status - to check for new man pages - git add them
|
||||
- git commit -a -v -m "Version v1.XX.0"
|
||||
- make retag
|
||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
- git push --follow-tags origin
|
||||
- \# Wait for the GitHub builds to complete then...
|
||||
- make fetch_binaries
|
||||
- make tarball
|
||||
- make vendorball
|
||||
- make sign_upload
|
||||
- make check_sign
|
||||
- make upload
|
||||
- make upload_website
|
||||
- make upload_github
|
||||
- make startdev # make startstable for stable branch
|
||||
- \# announce with forum post, twitter post, patreon post
|
||||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies.
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* `make updatedirect`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* Fix anything which doesn't compile at this point and commit changes here
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
- Review any pinned packages in go.mod and remove if possible
|
||||
- `make updatedirect`
|
||||
- `make GOTAGS=cmount`
|
||||
- `make compiletest`
|
||||
- Fix anything which doesn't compile at this point and commit changes here
|
||||
- `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
|
||||
go 1.22.0
|
||||
```text
|
||||
go 1.22.0
|
||||
```
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
@@ -57,7 +60,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```
|
||||
```sh
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
@@ -67,7 +70,7 @@ If the `go mod tidy` fails use the output from it to remove the
|
||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||
done
|
||||
|
||||
```
|
||||
```sh
|
||||
git co go.mod go.sum
|
||||
```
|
||||
|
||||
@@ -77,12 +80,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
|
||||
likely to fail if the manual method was used abve - in that case
|
||||
ignore it as it is too time consuming to fix.
|
||||
|
||||
* `make update`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* roll back any updates which didn't compile
|
||||
* `git commit -a -v --amend`
|
||||
* **NB** watch out for this changing the default go version in `go.mod`
|
||||
- `make update`
|
||||
- `make GOTAGS=cmount`
|
||||
- `make compiletest`
|
||||
- roll back any updates which didn't compile
|
||||
- `git commit -a -v --amend`
|
||||
- **NB** watch out for this changing the default go version in `go.mod`
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
and there can occasionally be forwards compatibility problems with
|
||||
@@ -99,7 +102,9 @@ The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
```sh
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
```
|
||||
|
||||
Expect API breakage when updating major versions.
|
||||
|
||||
@@ -107,7 +112,9 @@ Expect API breakage when updating major versions.
|
||||
|
||||
At some point after the release run
|
||||
|
||||
bin/tidy-beta v1.55
|
||||
```sh
|
||||
bin/tidy-beta v1.55
|
||||
```
|
||||
|
||||
where the version number is that of a couple ago to remove old beta binaries.
|
||||
|
||||
@@ -117,54 +124,64 @@ If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # e.g. v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
- BASE_TAG=v1.XX # e.g. v1.52
|
||||
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* make startstable
|
||||
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
- make startstable
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* make startstable
|
||||
* Do the steps as above
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
- git co ${BASE_TAG}-stable
|
||||
- git cherry-pick any fixes
|
||||
- make startstable
|
||||
- Do the steps as above
|
||||
- git co master
|
||||
- `#` cherry pick the changes to the changelog - check the diff to make sure it
|
||||
is correct
|
||||
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
- git push
|
||||
|
||||
## Sponsor logos
|
||||
|
||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
||||
If updating the website note that the sponsor logos have been moved out of the
|
||||
main repository.
|
||||
|
||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
||||
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
|
||||
which is a private repo containing artwork from sponsors.
|
||||
|
||||
## Update the website between releases
|
||||
|
||||
Create an update website branch based off the last release
|
||||
|
||||
git co -b update-website
|
||||
```sh
|
||||
git co -b update-website
|
||||
```
|
||||
|
||||
If the branch already exists, double check there are no commits that need saving.
|
||||
|
||||
Now reset the branch to the last release
|
||||
|
||||
git reset --hard v1.64.0
|
||||
```sh
|
||||
git reset --hard v1.64.0
|
||||
```
|
||||
|
||||
Create the changes, check them in, test with `make serve` then
|
||||
|
||||
make upload_test_website
|
||||
```sh
|
||||
make upload_test_website
|
||||
```
|
||||
|
||||
Check out https://test.rclone.org and when happy
|
||||
Check out <https://test.rclone.org> and when happy
|
||||
|
||||
make upload_website
|
||||
```sh
|
||||
make upload_website
|
||||
```
|
||||
|
||||
Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
@@ -172,14 +189,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
To do a basic build of rclone's docker image to debug builds locally:
|
||||
|
||||
```
|
||||
```sh
|
||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||
docker run --rm rclone/rclone:testing version
|
||||
```
|
||||
|
||||
To test the multipatform build
|
||||
|
||||
```
|
||||
```sh
|
||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||
```
|
||||
|
||||
@@ -187,6 +204,6 @@ To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
```sh
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
||||
@@ -14,10 +14,12 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filelu"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
@@ -51,6 +51,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -72,6 +73,7 @@ const (
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
sasCopyValidity = time.Hour // how long SAS should last when doing server side copy
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -559,6 +561,11 @@ type Fs struct {
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
publicAccess container.PublicAccessType // Container Public Access Level
|
||||
|
||||
// user delegation cache
|
||||
userDelegationMu sync.Mutex
|
||||
userDelegation *service.UserDelegationCredential
|
||||
userDelegationExpiry time.Time
|
||||
}
|
||||
|
||||
// Object describes an azure object
|
||||
@@ -612,6 +619,9 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(containerPath, "//") {
|
||||
containerPath = containerPath[:len(containerPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
@@ -928,6 +938,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -980,6 +991,38 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
|
||||
// Workload Identity based authentication
|
||||
var options azidentity.ManagedIdentityCredentialOptions
|
||||
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||
|
||||
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
getClientAssertions := func(context.Context) (string, error) {
|
||||
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
||||
Scopes: []string{"api://AzureADTokenExchange"},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
return token.Token, nil
|
||||
}
|
||||
|
||||
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
|
||||
f.cred, err = azidentity.NewClientAssertionCredential(
|
||||
opt.Tenant,
|
||||
opt.ClientID,
|
||||
getClientAssertions,
|
||||
assertOpts)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
f.cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
@@ -1213,7 +1256,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
@@ -1534,7 +1577,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
@@ -1684,6 +1727,38 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.deleteContainer(ctx, container)
|
||||
}
|
||||
|
||||
// Get a user delegation which is valid for at least sasCopyValidity
|
||||
//
|
||||
// This value is cached in f
|
||||
func (f *Fs) getUserDelegation(ctx context.Context) (*service.UserDelegationCredential, error) {
|
||||
f.userDelegationMu.Lock()
|
||||
defer f.userDelegationMu.Unlock()
|
||||
|
||||
if f.userDelegation != nil && time.Until(f.userDelegationExpiry) > sasCopyValidity {
|
||||
return f.userDelegation, nil
|
||||
}
|
||||
|
||||
// Validity window
|
||||
start := time.Now().UTC()
|
||||
expiry := start.Add(2 * sasCopyValidity)
|
||||
startStr := start.Format(time.RFC3339)
|
||||
expiryStr := expiry.Format(time.RFC3339)
|
||||
|
||||
// Acquire user delegation key from the service client
|
||||
info := service.KeyInfo{
|
||||
Start: &startStr,
|
||||
Expiry: &expiryStr,
|
||||
}
|
||||
userDelegationKey, err := f.svc.GetUserDelegationCredential(ctx, info, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user delegation key: %w", err)
|
||||
}
|
||||
|
||||
f.userDelegation = userDelegationKey
|
||||
f.userDelegationExpiry = expiry
|
||||
return f.userDelegation, nil
|
||||
}
|
||||
|
||||
// getAuth gets auth to copy o.
|
||||
//
|
||||
// tokenOK is used to signal that token based auth (Microsoft Entra
|
||||
@@ -1695,7 +1770,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// URL (not a SAS) and token will be empty.
|
||||
//
|
||||
// If tokenOK is true it may also return a token for the auth.
|
||||
func (o *Object) getAuth(ctx context.Context, tokenOK bool, noAuth bool) (srcURL string, token *string, err error) {
|
||||
func (o *Object) getAuth(ctx context.Context, noAuth bool) (srcURL string, err error) {
|
||||
f := o.fs
|
||||
srcBlobSVC := o.getBlobSVC()
|
||||
srcURL = srcBlobSVC.URL()
|
||||
@@ -1704,29 +1779,47 @@ func (o *Object) getAuth(ctx context.Context, tokenOK bool, noAuth bool) (srcURL
|
||||
case noAuth:
|
||||
// If same storage account then no auth needed
|
||||
case f.cred != nil:
|
||||
if !tokenOK {
|
||||
return srcURL, token, errors.New("not supported: Microsoft Entra ID")
|
||||
}
|
||||
options := policy.TokenRequestOptions{}
|
||||
accessToken, err := f.cred.GetToken(ctx, options)
|
||||
// Generate a User Delegation SAS URL using Azure AD credentials
|
||||
userDelegationKey, err := f.getUserDelegation(ctx)
|
||||
if err != nil {
|
||||
return srcURL, token, fmt.Errorf("failed to create access token: %w", err)
|
||||
return "", fmt.Errorf("sas creation: %w", err)
|
||||
}
|
||||
token = &accessToken.Token
|
||||
|
||||
// Build the SAS values
|
||||
perms := sas.BlobPermissions{Read: true}
|
||||
container, containerPath := o.split()
|
||||
start := time.Now().UTC()
|
||||
expiry := start.Add(sasCopyValidity)
|
||||
vals := sas.BlobSignatureValues{
|
||||
StartTime: start,
|
||||
ExpiryTime: expiry,
|
||||
Permissions: perms.String(),
|
||||
ContainerName: container,
|
||||
BlobName: containerPath,
|
||||
}
|
||||
|
||||
// Sign with the delegation key
|
||||
queryParameters, err := vals.SignWithUserDelegation(userDelegationKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("signing SAS with user delegation failed: %w", err)
|
||||
}
|
||||
|
||||
// Append the SAS to the URL
|
||||
srcURL = srcBlobSVC.URL() + "?" + queryParameters.Encode()
|
||||
case f.sharedKeyCred != nil:
|
||||
// Generate a short lived SAS URL if using shared key credentials
|
||||
expiry := time.Now().Add(time.Hour)
|
||||
expiry := time.Now().Add(sasCopyValidity)
|
||||
sasOptions := blob.GetSASURLOptions{}
|
||||
srcURL, err = srcBlobSVC.GetSASURL(sas.BlobPermissions{Read: true}, expiry, &sasOptions)
|
||||
if err != nil {
|
||||
return srcURL, token, fmt.Errorf("failed to create SAS URL: %w", err)
|
||||
return srcURL, fmt.Errorf("failed to create SAS URL: %w", err)
|
||||
}
|
||||
case f.anonymous || f.opt.SASURL != "":
|
||||
// If using a SASURL or anonymous, no need for any extra auth
|
||||
default:
|
||||
return srcURL, token, errors.New("unknown authentication type")
|
||||
return srcURL, errors.New("unknown authentication type")
|
||||
}
|
||||
return srcURL, token, nil
|
||||
return srcURL, nil
|
||||
}
|
||||
|
||||
// Do multipart parallel copy.
|
||||
@@ -1747,7 +1840,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
o.fs = f
|
||||
o.remote = remote
|
||||
|
||||
srcURL, token, err := src.getAuth(ctx, true, false)
|
||||
srcURL, err := src.getAuth(ctx, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("multipart copy: %w", err)
|
||||
}
|
||||
@@ -1768,7 +1861,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
var (
|
||||
srcSize = src.size
|
||||
partSize = int64(chunksize.Calculator(o, src.size, blockblob.MaxBlocks, f.opt.ChunkSize))
|
||||
numParts = (srcSize-1)/partSize + 1
|
||||
numParts = (srcSize + partSize - 1) / partSize
|
||||
blockIDs = make([]string, numParts) // list of blocks for finalize
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
checker = newCheckForInvalidBlockOrBlob("copy", o)
|
||||
@@ -1791,7 +1884,8 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
Count: partSize,
|
||||
},
|
||||
// Specifies the authorization scheme and signature for the copy source.
|
||||
CopySourceAuthorization: token,
|
||||
// We use SAS URLs as this doesn't seem to work always
|
||||
// CopySourceAuthorization: token,
|
||||
// CPKInfo *blob.CPKInfo
|
||||
// CPKScopeInfo *blob.CPKScopeInfo
|
||||
}
|
||||
@@ -1861,7 +1955,7 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
||||
dstBlobSVC := f.getBlobSVC(dstContainer, dstPath)
|
||||
|
||||
// Get the source auth - none needed for same storage account
|
||||
srcURL, _, err := src.getAuth(ctx, false, f == src.fs)
|
||||
srcURL, err := src.getAuth(ctx, f == src.fs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("single part copy: source auth: %w", err)
|
||||
}
|
||||
@@ -2176,11 +2270,6 @@ func (o *Object) getTags() (tags map[string]string) {
|
||||
// getBlobSVC creates a blob client
|
||||
func (o *Object) getBlobSVC() *blob.Client {
|
||||
container, directory := o.split()
|
||||
// If we are trying to remove an all / directory marker then
|
||||
// this will have one / too many now.
|
||||
if bucket.IsAllSlashes(o.remote) {
|
||||
directory = strings.TrimSuffix(directory, "/")
|
||||
}
|
||||
return o.fs.getBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
@@ -2582,6 +2671,13 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(2)
|
||||
}
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
m := md5.New()
|
||||
currentChunkSize, err := io.Copy(m, reader)
|
||||
@@ -2669,6 +2765,8 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
blockList blockblob.GetBlockListResponse
|
||||
properties *blob.GetPropertiesResponse
|
||||
options *blockblob.CommitBlockListOptions
|
||||
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
|
||||
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
)
|
||||
|
||||
properties, err = o.readMetaDataAlways(ctx)
|
||||
@@ -2680,7 +2778,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
if objectExists {
|
||||
// Get the committed block list
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2722,7 +2820,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
// Commit only the committed blocks
|
||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2863,6 +2961,9 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
return ui, err
|
||||
}
|
||||
}
|
||||
// if ui.isDirMarker && strings.HasSuffix(containerPath, "//") {
|
||||
// containerPath = containerPath[:len(containerPath)-1]
|
||||
// }
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
//go:build plan9 || solaris || js || wasm
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||
package azurefiles
|
||||
@@ -453,7 +453,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
options := azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
@@ -516,6 +516,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -549,7 +550,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
case opt.UseMSI:
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
var b2i = map[bool]int{false: 0, true: 1}
|
||||
b2i := map[bool]int{false: 0, true: 1}
|
||||
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
|
||||
if set > 1 {
|
||||
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||
@@ -568,6 +569,37 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
|
||||
// Workload Identity based authentication
|
||||
var options azidentity.ManagedIdentityCredentialOptions
|
||||
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||
|
||||
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
getClientAssertions := func(context.Context) (string, error) {
|
||||
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
||||
Scopes: []string{"api://AzureADTokenExchange"},
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
return token.Token, nil
|
||||
}
|
||||
|
||||
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
|
||||
cred, err = azidentity.NewClientAssertionCredential(
|
||||
opt.Tenant,
|
||||
opt.ClientID,
|
||||
getClientAssertions,
|
||||
assertOpts)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
@@ -822,7 +854,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
return entries, err
|
||||
}
|
||||
|
||||
var opt = &directory.ListFilesAndDirectoriesOptions{
|
||||
opt := &directory.ListFilesAndDirectoriesOptions{
|
||||
Include: directory.ListFilesInclude{
|
||||
Timestamps: true,
|
||||
},
|
||||
@@ -921,7 +953,7 @@ func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
// getMetadata gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) getMetadata(ctx context.Context) error {
|
||||
resp, err := o.fileClient().GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
@@ -981,6 +1013,10 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
SMBProperties: &file.SMBProperties{
|
||||
LastWriteTime: &t,
|
||||
},
|
||||
HTTPHeaders: &file.HTTPHeaders{
|
||||
ContentMD5: o.md5,
|
||||
ContentType: &o.contentType,
|
||||
},
|
||||
}
|
||||
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package azurefiles
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package azurefiles
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
//go:build plan9 || js || wasm
|
||||
|
||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||
package azurefiles
|
||||
|
||||
@@ -1673,6 +1673,21 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
return o.getMetaDataListing(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// If using versionAt we need to list the find the correct version.
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
info, err := o.getMetaDataListing(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.Action == "hide" {
|
||||
// Rerturn object not found error if the current version is deleted.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||
return info, err
|
||||
}
|
||||
@@ -1883,9 +1898,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
// Additionally, the official examples return S3 headers
|
||||
// instead of native, i.e. no file ID, use ones from listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
if info.ID == "" {
|
||||
info.ID = o.id
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -446,14 +446,14 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
t.Run("List", func(t *testing.T) {
|
||||
fstest.CheckListing(t, f, test.want)
|
||||
})
|
||||
// b2 NewObject doesn't work with VersionAt
|
||||
//t.Run("NewObject", func(t *testing.T) {
|
||||
// gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
// assert.Equal(t, test.wantErr, gotErr)
|
||||
// if gotErr == nil {
|
||||
// assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
// }
|
||||
//})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
if gotErr == nil {
|
||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
@@ -271,9 +271,9 @@ type User struct {
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Language string `json:"language"`
|
||||
Timezone string `json:"timezone"`
|
||||
SpaceAmount int64 `json:"space_amount"`
|
||||
SpaceUsed int64 `json:"space_used"`
|
||||
MaxUploadSize int64 `json:"max_upload_size"`
|
||||
SpaceAmount float64 `json:"space_amount"`
|
||||
SpaceUsed float64 `json:"space_used"`
|
||||
MaxUploadSize float64 `json:"max_upload_size"`
|
||||
Status string `json:"status"`
|
||||
JobTitle string `json:"job_title"`
|
||||
Phone string `json:"phone"`
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
||||
2
backend/cache/cache_internal_test.go
vendored
2
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !race
|
||||
//go:build !plan9 && !js && !wasm && !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
//go:build !plan9 && !js && !race
|
||||
//go:build !plan9 && !js && !wasm && !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,7 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
//go:build plan9 || js || wasm
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !race
|
||||
//go:build !plan9 && !js && !wasm && !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/handle.go
vendored
2
backend/cache/handle.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/object.go
vendored
2
backend/cache/object.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
3
backend/cache/utils_test.go
vendored
3
backend/cache/utils_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
//go:build !plan9 && !js && !wasm
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1861,6 +1861,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
||||
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.NameTransform = nil // ensure operations.Move does not double-transform here
|
||||
var (
|
||||
dest fs.Object
|
||||
err error
|
||||
|
||||
@@ -858,7 +858,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, dir, wrappedCallback)
|
||||
return listP(ctx, uRemote, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
|
||||
38
backend/doi/api/dataversetypes.go
Normal file
38
backend/doi/api/dataversetypes.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Type definitions specific to Dataverse
|
||||
|
||||
package api
|
||||
|
||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
||||
type DataverseDatasetResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data DataverseDataset `json:"data"`
|
||||
}
|
||||
|
||||
// DataverseDataset is the representation of a dataset
|
||||
type DataverseDataset struct {
|
||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
||||
}
|
||||
|
||||
// DataverseDatasetVersion is the representation of a dataset version
|
||||
type DataverseDatasetVersion struct {
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
Files []DataverseFile `json:"files"`
|
||||
}
|
||||
|
||||
// DataverseFile is the representation of a file found in a dataset
|
||||
type DataverseFile struct {
|
||||
DirectoryLabel string `json:"directoryLabel"`
|
||||
DataFile DataverseDataFile `json:"dataFile"`
|
||||
}
|
||||
|
||||
// DataverseDataFile represents file metadata details
|
||||
type DataverseDataFile struct {
|
||||
ID int64 `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"contentType"`
|
||||
FileSize int64 `json:"filesize"`
|
||||
OriginalFileFormat string `json:"originalFileFormat"`
|
||||
OriginalFileSize int64 `json:"originalFileSize"`
|
||||
OriginalFileName string `json:"originalFileName"`
|
||||
MD5 string `json:"md5"`
|
||||
}
|
||||
33
backend/doi/api/inveniotypes.go
Normal file
33
backend/doi/api/inveniotypes.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Type definitions specific to InvenioRDM
|
||||
|
||||
package api
|
||||
|
||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
||||
type InvenioRecordResponse struct {
|
||||
Links InvenioRecordResponseLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioRecordResponseLinks represents a record's links
|
||||
type InvenioRecordResponseLinks struct {
|
||||
Self string `json:"self"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponse is the representation of a record's files
|
||||
type InvenioFilesResponse struct {
|
||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntry is the representation of a file entry
|
||||
type InvenioFilesResponseEntry struct {
|
||||
Key string `json:"key"`
|
||||
Checksum string `json:"checksum"`
|
||||
Size int64 `json:"size"`
|
||||
Updated string `json:"updated"`
|
||||
MimeType string `json:"mimetype"`
|
||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntryLinks represents file links details
|
||||
type InvenioFilesResponseEntryLinks struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
26
backend/doi/api/types.go
Normal file
26
backend/doi/api/types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Package api has general type definitions for doi
|
||||
package api
|
||||
|
||||
// DoiResolverResponse is returned by the DOI resolver API
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
type DoiResolverResponse struct {
|
||||
ResponseCode int `json:"responseCode"`
|
||||
Handle string `json:"handle"`
|
||||
Values []DoiResolverResponseValue `json:"values"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValue is a single handle record value
|
||||
type DoiResolverResponseValue struct {
|
||||
Index int `json:"index"`
|
||||
Type string `json:"type"`
|
||||
Data DoiResolverResponseValueData `json:"data"`
|
||||
TTL int `json:"ttl"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValueData is the data held in a handle value
|
||||
type DoiResolverResponseValueData struct {
|
||||
Format string `json:"format"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
112
backend/doi/dataverse.go
Normal file
112
backend/doi/dataverse.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Implementation for Dataverse
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
return persistentID != ""
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
|
||||
query := url.Values{}
|
||||
query.Add("persistentId", persistentID)
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
||||
|
||||
return Dataverse, endpointURL, nil
|
||||
}
|
||||
|
||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
||||
type dataverseProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := dp.f.endpoint
|
||||
var res *http.Response
|
||||
var result api.DataverseDatasetResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
Parameters: filesURL.Query(),
|
||||
}
|
||||
err = dp.f.pacer.Call(func() (bool, error) {
|
||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
for _, file := range result.Data.LatestVersion.Files {
|
||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
||||
query := url.Values{}
|
||||
query.Add("format", "original")
|
||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
||||
entry := &Object{
|
||||
fs: dp.f,
|
||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
||||
contentURL: contentURL.String(),
|
||||
size: file.DataFile.FileSize,
|
||||
modTime: modTime,
|
||||
md5: file.DataFile.MD5,
|
||||
contentType: file.DataFile.ContentType,
|
||||
}
|
||||
if file.DataFile.OriginalFileName != "" {
|
||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
||||
entry.size = file.DataFile.OriginalFileSize
|
||||
entry.contentType = file.DataFile.OriginalFileFormat
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
dp.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newDataverseProvider(f *Fs) doiProvider {
|
||||
return &dataverseProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
649
backend/doi/doi.go
Normal file
649
backend/doi/doi.go
Normal file
@@ -0,0 +1,649 @@
|
||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
||||
//
|
||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// the URL of the DOI resolver
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
doiResolverAPIURL = "https://doi.org/api"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("doi remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "doi",
|
||||
Description: "DOI datasets",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "doi",
|
||||
Help: "The DOI or the doi.org URL.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: fs.ConfigProvider,
|
||||
Help: `DOI provider.
|
||||
|
||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "auto",
|
||||
Help: "Auto-detect provider",
|
||||
},
|
||||
{
|
||||
Value: string(Zenodo),
|
||||
Help: "Zenodo",
|
||||
}, {
|
||||
Value: string(Dataverse),
|
||||
Help: "Dataverse",
|
||||
}, {
|
||||
Value: string(Invenio),
|
||||
Help: "Invenio",
|
||||
}},
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Provider defines the type of provider hosting the DOI
|
||||
type Provider string
|
||||
|
||||
const (
|
||||
// Zenodo provider, see https://zenodo.org
|
||||
Zenodo Provider = "zenodo"
|
||||
// Dataverse provider, see https://dataverse.harvard.edu
|
||||
Dataverse Provider = "dataverse"
|
||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
||||
Invenio Provider = "invenio"
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
||||
Provider string `config:"provider"` // The DOI provider
|
||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
provider Provider // the DOI provider
|
||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL // the main API endpoint for this remote
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
cache *cache.Cache // a cache for the remote metadata
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // the remote path
|
||||
contentURL string // the URL where the contents of the file can be downloaded
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
contentType string // content type of the object
|
||||
md5 string // MD5 hash of the object content
|
||||
}
|
||||
|
||||
// doiProvider is the interface used to list objects in a DOI
|
||||
type doiProvider interface {
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
||||
}
|
||||
|
||||
// Parse the input string as a DOI
|
||||
// Examples:
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
func parseDoi(doi string) string {
|
||||
doiURL, err := url.Parse(doi)
|
||||
if err != nil {
|
||||
return doi
|
||||
}
|
||||
if doiURL.Scheme == "doi" {
|
||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
||||
}
|
||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
||||
return strings.TrimLeft(doiURL.Path, "/")
|
||||
}
|
||||
return doi
|
||||
}
|
||||
|
||||
// Resolve a DOI to a URL
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
||||
resolverURL := opt.DoiResolverAPIURL
|
||||
if resolverURL == "" {
|
||||
resolverURL = doiResolverAPIURL
|
||||
}
|
||||
|
||||
var result api.DoiResolverResponse
|
||||
params := url.Values{}
|
||||
params.Add("index", "1")
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolverURL,
|
||||
Path: "/handles/" + opt.Doi,
|
||||
Parameters: params,
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.ResponseCode != 1 {
|
||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
||||
}
|
||||
resolvedURLStr := ""
|
||||
for _, value := range result.Values {
|
||||
if value.Type == "URL" && value.Data.Format == "string" {
|
||||
valueStr, ok := value.Data.Value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
||||
}
|
||||
resolvedURLStr = valueStr
|
||||
}
|
||||
}
|
||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resolvedURL, nil
|
||||
}
|
||||
|
||||
// Resolve the passed configuration into a provider and enpoint
|
||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
switch opt.Provider {
|
||||
case string(Dataverse):
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
case string(Invenio):
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
case string(Zenodo):
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
|
||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
}
|
||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
||||
}
|
||||
|
||||
// Make the http connection from the passed options
|
||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Update f with the new parameters
|
||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
||||
f.endpoint = endpoint
|
||||
f.endpointURL = endpoint.String()
|
||||
f.provider = provider
|
||||
f.opt.Provider = string(provider)
|
||||
|
||||
switch f.provider {
|
||||
case Dataverse:
|
||||
f.doiProvider = newDataverseProvider(f)
|
||||
case Invenio, Zenodo:
|
||||
f.doiProvider = newInvenioProvider(f)
|
||||
default:
|
||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
||||
}
|
||||
|
||||
// Determine if the root is a file
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.remote == f.root {
|
||||
isFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this res and err
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Doi = parseDoi(opt.Doi)
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(client),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
cache: cache.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
isFile, err := f.httpConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.root = newRoot
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
// return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteFullPath := remote
|
||||
if f.root != "" {
|
||||
remoteFullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == remoteFullPath {
|
||||
return entry, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
|
||||
fullDir := path.Join(f.root, dir)
|
||||
if fullDir != "" {
|
||||
fullDir += "/"
|
||||
}
|
||||
|
||||
dirPaths := map[string]bool{}
|
||||
for _, entry := range fileEntries {
|
||||
// First, filter out files not in `fullDir`
|
||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
||||
continue
|
||||
}
|
||||
// Then, find entries in subfolers
|
||||
remotePath := entry.remote
|
||||
if fullDir != "" {
|
||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
||||
}
|
||||
parts := strings.SplitN(remotePath, "/", 2)
|
||||
if len(parts) == 1 {
|
||||
newEntry := *entry
|
||||
newEntry.remote = path.Join(dir, remotePath)
|
||||
entries = append(entries, &newEntry)
|
||||
} else {
|
||||
dirPaths[path.Join(dir, parts[0])] = true
|
||||
}
|
||||
}
|
||||
|
||||
for dirPath := range dirPaths {
|
||||
entry := fs.NewDir(dirPath, time.Time{})
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.contentURL,
|
||||
Options: options,
|
||||
}
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
|
||||
// Handle non-compliant redirects
|
||||
if res.Header.Get("Location") != "" {
|
||||
newURL, err := res.Location()
|
||||
if err == nil {
|
||||
opts.RootURL = newURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "metadata",
|
||||
Short: "Show metadata about the DOI.",
|
||||
Long: `This command returns a JSON object with some information about the DOI.
|
||||
|
||||
rclone backend medatadata doi:
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.
|
||||
`,
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running doi backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
This rebuilds the connection to the doi backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
_, err = f.httpConnection(ctx, &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating session: %w", err)
|
||||
}
|
||||
f.opt = newOpt
|
||||
keys := []string{}
|
||||
for k := range opt {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := map[string]any{}
|
||||
info["DOI"] = f.opt.Doi
|
||||
info["URL"] = doiURL.String()
|
||||
info["metadataURL"] = f.endpointURL
|
||||
info["provider"] = f.provider
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
260
backend/doi/doi_internal_test.go
Normal file
260
backend/doi/doi_internal_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var remoteName = "TestDoi"
|
||||
|
||||
func TestParseDoi(t *testing.T) {
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
doi := "10.1000/182"
|
||||
parsed := parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://doi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://dx.doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://dxdoi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
doi = "doi:10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi://10.1000/182 -> 10.1000/182
|
||||
doi = "doi://10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
}
|
||||
|
||||
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
|
||||
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for resolving DOIs
|
||||
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are resolving a DOI
|
||||
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
|
||||
assert.NotEmpty(t, handle)
|
||||
index := r.URL.Query().Get("index")
|
||||
assert.Equal(t, "1", index)
|
||||
|
||||
// Return the most basic response
|
||||
result := api.DoiResolverResponse{
|
||||
ResponseCode: 1,
|
||||
Handle: handle,
|
||||
Values: []api.DoiResolverResponseValue{
|
||||
{
|
||||
Index: 1,
|
||||
Type: "URL",
|
||||
Data: api.DoiResolverResponseValueData{
|
||||
Format: "string",
|
||||
Value: resolvedURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts.URL + "/api"
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
|
||||
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for a single record
|
||||
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning data about a single record
|
||||
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
|
||||
assert.NotEmpty(t, recordID)
|
||||
|
||||
// Return the most basic response
|
||||
selfURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
selfURL = selfURL.JoinPath(r.URL.String())
|
||||
result := api.InvenioRecordResponse{
|
||||
Links: api.InvenioRecordResponseLinks{
|
||||
Self: selfURL.String(),
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for listing files in a record
|
||||
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Return the most basic response
|
||||
filesBaseURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
|
||||
|
||||
entries := []api.InvenioFilesResponseEntry{}
|
||||
for filename, contents := range files {
|
||||
entries = append(entries,
|
||||
api.InvenioFilesResponseEntry{
|
||||
Key: filename,
|
||||
Checksum: md5Sum(contents),
|
||||
Size: int64(len(contents)),
|
||||
Updated: time.Now().UTC().Format(time.RFC3339),
|
||||
MimeType: "text/plain; charset=utf-8",
|
||||
Links: api.InvenioFilesResponseEntryLinks{
|
||||
Content: filesBaseURL.JoinPath(filename).String(),
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
result := api.InvenioFilesResponse{
|
||||
Entries: entries,
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for file contents
|
||||
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning the contents of a file
|
||||
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
|
||||
assert.NotEmpty(t, filename)
|
||||
contents, found := files[filename]
|
||||
if !found {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the most basic response
|
||||
_, err := w.Write([]byte(contents))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
func TestZenodoRemote(t *testing.T) {
|
||||
recordID := "2600782"
|
||||
doi := "10.5281/zenodo.2600782"
|
||||
|
||||
// The files in the dataset
|
||||
files := map[string]string{
|
||||
"README.md": "This is a dataset.",
|
||||
"data.txt": "Some data",
|
||||
}
|
||||
|
||||
ts := prepareMockZenodoServer(t, files)
|
||||
resolvedURL := ts.URL + "/record/" + recordID
|
||||
|
||||
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
|
||||
|
||||
testConfig := configmap.Simple{
|
||||
"type": "doi",
|
||||
"doi": doi,
|
||||
"provider": "zenodo",
|
||||
"doi_resolver_api_url": doiResolverAPIURL,
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", testConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test listing the DOI files
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, len(files), len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "README.md", e.Remote())
|
||||
assert.Equal(t, int64(18), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "data.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test reading the DOI files
|
||||
o, err := f.NewObject(context.Background(), "README.md")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(18), o.Size())
|
||||
md5Hash, err := o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["README.md"]), data)
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
|
||||
o, err = f.NewObject(context.Background(), "data.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
md5Hash, err = o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
|
||||
fd, err = o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["data.txt"]), data)
|
||||
do, ok = o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
16
backend/doi/doi_test.go
Normal file
16
backend/doi/doi_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Test DOI filesystem interface
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDoi:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
164
backend/doi/invenio.go
Normal file
164
backend/doi/invenio.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// Implementation for InvenioRDM
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
|
||||
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
|
||||
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
|
||||
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
var res *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err = srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// First, attempt to grab the API URL from the headers
|
||||
var linksetURL *url.URL
|
||||
links := parseLinkHeader(res.Header.Get("Link"))
|
||||
for _, link := range links {
|
||||
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
|
||||
parsed, err := url.Parse(link.Href)
|
||||
if err == nil {
|
||||
linksetURL = parsed
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if linksetURL != nil {
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// If there is no linkset header, try to grab the record ID from the URL
|
||||
recordID := ""
|
||||
resURL := res.Request.URL
|
||||
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
|
||||
if match != nil {
|
||||
recordID = match[1]
|
||||
guessedURL := res.Request.URL.ResolveReference(&url.URL{
|
||||
Path: "/api/records/" + recordID,
|
||||
})
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Links.Self == "" {
|
||||
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
|
||||
}
|
||||
return url.Parse(result.Links.Self)
|
||||
}
|
||||
|
||||
// invenioProvider implements the doiProvider interface for InvenioRDM installations
|
||||
type invenioProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := ip.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := ip.f.endpoint.JoinPath("files")
|
||||
var result api.InvenioFilesResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
}
|
||||
err = ip.f.pacer.Call(func() (bool, error) {
|
||||
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
for _, file := range result.Entries {
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
entry := &Object{
|
||||
fs: ip.f,
|
||||
remote: file.Key,
|
||||
contentURL: file.Links.Content,
|
||||
size: file.Size,
|
||||
modTime: modTime,
|
||||
contentType: file.MimeType,
|
||||
md5: strings.TrimPrefix(file.Checksum, "md5:"),
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
ip.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newInvenioProvider(f *Fs) doiProvider {
|
||||
return &invenioProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
75
backend/doi/link_header.go
Normal file
75
backend/doi/link_header.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
|
||||
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
|
||||
|
||||
// headerLink represents a link as presented in HTTP headers
|
||||
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
|
||||
type headerLink struct {
|
||||
Href string
|
||||
Rel string
|
||||
Type string
|
||||
Extras map[string]string
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
links = append(links, *parsed)
|
||||
}
|
||||
}
|
||||
return links
|
||||
}
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
match := linkRegex.FindStringSubmatch(parts[0])
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := &headerLink{
|
||||
Href: match[1],
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
|
||||
for _, keyValue := range parts[1:] {
|
||||
parsed := parseKeyValue(keyValue)
|
||||
if parsed != nil {
|
||||
key, value := parsed[0], parsed[1]
|
||||
switch strings.ToLower(key) {
|
||||
case "rel":
|
||||
result.Rel = value
|
||||
case "type":
|
||||
result.Type = value
|
||||
default:
|
||||
result.Extras[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseKeyValue(keyValue string) []string {
|
||||
parts := strings.SplitN(keyValue, "=", 2)
|
||||
if parts[0] == "" || len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
match := valueRegex.FindStringSubmatch(parts[1])
|
||||
if match != nil {
|
||||
parts[1] = match[1]
|
||||
return parts
|
||||
}
|
||||
return parts
|
||||
}
|
||||
44
backend/doi/link_header_internal_test.go
Normal file
44
backend/doi/link_header_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseLinkHeader(t *testing.T) {
|
||||
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
|
||||
links := parseLinkHeader(header)
|
||||
expected := headerLink{
|
||||
Href: "https://zenodo.org/api/records/15063252",
|
||||
Rel: "linkset",
|
||||
Type: "application/linkset+json",
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
assert.Contains(t, links, expected)
|
||||
|
||||
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
|
||||
links = parseLinkHeader(header)
|
||||
expectedList := []headerLink{{
|
||||
Href: "https://api.example.com/issues?page=2",
|
||||
Rel: "prev",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=4",
|
||||
Rel: "next",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=10",
|
||||
Rel: "last",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=1",
|
||||
Rel: "first",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}}
|
||||
assert.Equal(t, links, expectedList)
|
||||
}
|
||||
47
backend/doi/zenodo.go
Normal file
47
backend/doi/zenodo.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Implementation for Zenodo
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on Zenodo
|
||||
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
|
||||
match := zenodoRecordRegex.FindStringSubmatch(doi)
|
||||
if match == nil {
|
||||
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
recordID := match[1]
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
|
||||
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: endpointURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
endpointURL, err = url.Parse(result.Links.Self)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return Zenodo, endpointURL, nil
|
||||
}
|
||||
@@ -1745,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
@@ -1776,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -507,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
@@ -532,7 +532,9 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
if isFolder {
|
||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
@@ -629,7 +631,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -1446,9 +1446,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(used), // bytes in use
|
||||
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
81
backend/filelu/api/types.go
Normal file
81
backend/filelu/api/types.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Package api defines types for interacting with the FileLu API.
|
||||
package api
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// CreateFolderResponse represents the response for creating a folder.
|
||||
type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID interface{} `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFolderResponse represents the response for deleting a folder.
|
||||
type DeleteFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// FolderListResponse represents the response for listing folders.
|
||||
type FolderListResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
FileCode string `json:"file_code"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"files"`
|
||||
Folders []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
} `json:"folders"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileDirectLinkResponse represents the response for a direct link to a file.
|
||||
type FileDirectLinkResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
URL string `json:"url"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileInfoResponse represents the response for file information.
|
||||
type FileInfoResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Size string `json:"size"`
|
||||
Name string `json:"name"`
|
||||
FileCode string `json:"filecode"`
|
||||
Hash string `json:"hash"`
|
||||
Status int `json:"status"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFileResponse represents the response for deleting a file.
|
||||
type DeleteFileResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// AccountInfoResponse represents the response for account information.
|
||||
type AccountInfoResponse struct {
|
||||
Status int `json:"status"` // HTTP status code of the response.
|
||||
Msg string `json:"msg"` // Message describing the response.
|
||||
Result struct {
|
||||
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
|
||||
Email string `json:"email"` // User's email address.
|
||||
UType string `json:"utype"` // User type (e.g., premium or free).
|
||||
Storage string `json:"storage"` // Total storage available to the user.
|
||||
StorageUsed string `json:"storage_used"` // Amount of storage used.
|
||||
} `json:"result"` // Nested result structure containing account details.
|
||||
}
|
||||
366
backend/filelu/filelu.go
Normal file
366
backend/filelu/filelu.go
Normal file
@@ -0,0 +1,366 @@
|
||||
// Package filelu provides an interface to the FileLu storage system.
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Register the backend with Rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filelu",
|
||||
Description: "FileLu Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "key",
|
||||
Help: "Your FileLu Rclone key from My Account",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeExclamation |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeSingleQuote |
|
||||
encoder.EncodeBackQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeColon |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeLeftCrLfHtVt |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeSquareBracket |
|
||||
encoder.EncodeSemicolon |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for the FileLu backend
|
||||
type Options struct {
|
||||
Key string `config:"key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents the FileLu file system
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpoint string
|
||||
pacer *pacer.Pacer
|
||||
srv *rest.Client
|
||||
client *http.Client
|
||||
targetFile string
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object for FileLu
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
if opt.Key == "" {
|
||||
return nil, fmt.Errorf("FileLu Rclone Key is required")
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
if strings.TrimSpace(root) == "" {
|
||||
root = ""
|
||||
}
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
filename := ""
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
endpoint: "https://filelu.com/rclone",
|
||||
client: client,
|
||||
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
|
||||
pacer: pacer.New(),
|
||||
targetFile: filename,
|
||||
root: root,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
WriteMetadata: false,
|
||||
SlowHash: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
rootContainer, rootDirectory := rootSplit(f.root)
|
||||
if rootContainer != "" && rootDirectory != "" {
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.root = strings.Trim(newRoot, "/")
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = strings.Trim(oldRoot, "/")
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mkdir to create directory on remote server.
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Clean(f.root + "/" + dir)
|
||||
_, err := f.createFolder(ctx, fullPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// About provides usage statistics for the remote
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
accountInfo, err := f.getAccountInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse total storage: %w", err)
|
||||
}
|
||||
|
||||
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse used storage: %w", err)
|
||||
}
|
||||
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(totalStorage), // Total bytes available
|
||||
Used: fs.NewUsageValue(usedStorage), // Total bytes used
|
||||
Free: fs.NewUsageValue(totalStorage - usedStorage),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge deletes the directory and all its contents
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// List returns a list of files and folders
|
||||
// List returns a list of files and folders for the given directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
// Compose full path for API call
|
||||
fullPath := path.Join(f.root, dir)
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
if fullPath == "/" {
|
||||
fullPath = ""
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
result, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fldMap := map[string]bool{}
|
||||
for _, folder := range result.Result.Folders {
|
||||
fldMap[folder.FldID.String()] = true
|
||||
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
paths := strings.Split(folder.Path, fullPath+"/")
|
||||
remote := paths[0]
|
||||
if len(paths) > 1 {
|
||||
remote = paths[1]
|
||||
}
|
||||
|
||||
if strings.Contains(remote, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
|
||||
remotePathWithoutRoot := pathsWithoutRoot[0]
|
||||
if len(pathsWithoutRoot) > 1 {
|
||||
remotePathWithoutRoot = pathsWithoutRoot[1]
|
||||
}
|
||||
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
|
||||
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
|
||||
}
|
||||
for _, file := range result.Result.Files {
|
||||
if _, ok := fldMap[file.FldID.String()]; ok {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, file.Name)
|
||||
// trim leading slashes
|
||||
remote = strings.TrimPrefix(remote, "/")
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: file.Size,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
entries = append(entries, obj)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := f.uploadFile(ctx, in, src.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newObject := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// Move moves the file to the specified location
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
|
||||
|
||||
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
|
||||
dir := path.Dir(destinationPath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dest, err := os.Create(destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dest, reader); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file content: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source object: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = f.uploadFile(ctx, reader, destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete source file: %w", err)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: destinationPath,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
|
||||
// Step 1: Check if folder is empty
|
||||
listResp, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
|
||||
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
|
||||
}
|
||||
|
||||
// Step 2: Delete the folder
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
324
backend/filelu/filelu_client.go
Normal file
324
backend/filelu/filelu_client.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/filelu/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createFolder creates a folder at the specified path.
|
||||
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
||||
encodedDir := f.fromStandardPath(dirPath)
|
||||
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
|
||||
)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
result := api.CreateFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var innerErr error
|
||||
resp, innerErr = f.client.Do(req)
|
||||
return fserrors.ShouldRetry(innerErr), innerErr
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFolderList List both files and folders in a directory.
|
||||
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
||||
encodedDir := f.fromStandardPath(path)
|
||||
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var response api.FolderListResponse
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if response.Status != 200 {
|
||||
if strings.Contains(response.Msg, "Folder not found") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("API error: %s", response.Msg)
|
||||
}
|
||||
|
||||
for index := range response.Result.Folders {
|
||||
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
|
||||
}
|
||||
|
||||
for index := range response.Result.Files {
|
||||
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
|
||||
}
|
||||
|
||||
// deleteFolder deletes a folder at the specified path.
|
||||
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
||||
fullPath = f.fromStandardPath(fullPath)
|
||||
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(fullPath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
delResp := api.DeleteFolderResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &delResp); err != nil {
|
||||
return false, fmt.Errorf("error decoding delete response: %w", err)
|
||||
}
|
||||
if delResp.Status != 200 {
|
||||
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDirectLink of file from FileLu to download.
|
||||
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.FileDirectLinkResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return result.Result.URL, result.Result.Size, nil
|
||||
}
|
||||
|
||||
// deleteFile deletes a file based on filePath
|
||||
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.DeleteFileResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// getAccountInfo retrieves account information
|
||||
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/account/info",
|
||||
Parameters: url.Values{
|
||||
"key": {f.opt.Key},
|
||||
},
|
||||
}
|
||||
|
||||
var result api.AccountInfoResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return fserrors.ShouldRetry(callErr), callErr
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFileInfo retrieves file information based on file code
|
||||
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
||||
u, _ := url.Parse(f.endpoint + "/file/info2")
|
||||
q := u.Query()
|
||||
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
|
||||
q.Set("key", f.opt.Key)
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := api.FileInfoResponse{}
|
||||
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
193
backend/filelu/filelu_file_uploader.go
Normal file
193
backend/filelu/filelu_file_uploader.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// uploadFile uploads a file to FileLu
|
||||
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
||||
directory := path.Dir(fileFullPath)
|
||||
fileName := path.Base(fileFullPath)
|
||||
if directory == "." {
|
||||
directory = ""
|
||||
}
|
||||
destinationFolderPath := path.Join(f.root, directory)
|
||||
if destinationFolderPath != "" {
|
||||
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
|
||||
}
|
||||
|
||||
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
err = f.Mkdir(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("failed to list existing files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range existingEntries {
|
||||
if entry.Remote() == fileFullPath {
|
||||
_, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the file exists but is different, remove it
|
||||
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
|
||||
err = f.deleteFile(ctx, filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete existing file: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uploadURL, sessID, err := f.getUploadServer(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve upload server: %w", err)
|
||||
}
|
||||
|
||||
// Since the fileCode isn't used, just handle the error
|
||||
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getUploadServer gets the upload server URL with proper key authentication
|
||||
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
||||
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
SessID string `json:"sess_id"`
|
||||
Result string `json:"result"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return result.Result, result.SessID, nil
|
||||
}
|
||||
|
||||
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
|
||||
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
|
||||
destinationPath := f.fromStandardPath(dirPath)
|
||||
encodedFileName := f.fromStandardPath(fileName)
|
||||
pr, pw := io.Pipe()
|
||||
writer := multipart.NewWriter(pw)
|
||||
isDeletionRequired := false
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := pw.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close: %v", err)
|
||||
}
|
||||
}()
|
||||
_ = writer.WriteField("sess_id", sessID)
|
||||
_ = writer.WriteField("utype", "prem")
|
||||
_ = writer.WriteField("fld_path", destinationPath)
|
||||
|
||||
part, err := writer.CreateFormFile("file_0", encodedFileName)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, fileContent); err != nil {
|
||||
isDeletionRequired = true
|
||||
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
var fileCode string
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create upload request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
|
||||
}
|
||||
defer respBodyClose(resp.Body)
|
||||
|
||||
var result []struct {
|
||||
FileCode string `json:"file_code"`
|
||||
FileStatus string `json:"file_status"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("failed to parse upload response: %w", err)
|
||||
}
|
||||
|
||||
if len(result) == 0 || result[0].FileStatus != "OK" {
|
||||
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
|
||||
}
|
||||
|
||||
fileCode = result[0].FileCode
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil && isDeletionRequired {
|
||||
// Attempt to delete the file if upload fails
|
||||
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
|
||||
}
|
||||
|
||||
return fileCode, err
|
||||
}
|
||||
|
||||
// respBodyClose to check body response.
|
||||
func respBodyClose(responseBody io.Closer) {
|
||||
if cerr := responseBody.Close(); cerr != nil {
|
||||
fmt.Printf("Error closing response body: %v\n", cerr)
|
||||
}
|
||||
}
|
||||
112
backend/filelu/filelu_helper.go
Normal file
112
backend/filelu/filelu_helper.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// errFileNotFound represent file not found error
|
||||
var errFileNotFound error = errors.New("file not found")
|
||||
|
||||
// getFileCode retrieves the file code for a given file path
|
||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||
// Prepare parent directory
|
||||
parentDir := path.Dir(filePath)
|
||||
|
||||
// Call List to get all the files
|
||||
result, err := f.getFolderList(ctx, parentDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, file := range result.Result.Files {
|
||||
filePathFromServer := parentDir + "/" + file.Name
|
||||
if parentDir == "/" {
|
||||
filePathFromServer = "/" + file.Name
|
||||
}
|
||||
if filePath == filePathFromServer {
|
||||
return file.FileCode, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errFileNotFound
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
func (f *Fs) fromStandardPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toStandardPath(remote string) string {
|
||||
return f.opt.Enc.ToStandardPath(remote)
|
||||
}
|
||||
|
||||
// Hashes returns an empty hash set, indicating no hash support
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet() // Properly creates an empty hash set
|
||||
}
|
||||
|
||||
// Name returns the remote name
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Precision returns the precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FileLu root '%s'", f.root)
|
||||
}
|
||||
|
||||
// isFileCode checks if a string looks like a file code
|
||||
func isFileCode(s string) bool {
|
||||
if len(s) != 12 {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
return fserrors.ShouldRetry(err)
|
||||
}
|
||||
|
||||
func shouldRetryHTTP(code int) bool {
|
||||
return code == 429 || code >= 500
|
||||
}
|
||||
|
||||
func rootSplit(absPath string) (bucket, bucketPath string) {
|
||||
// No bucket
|
||||
if absPath == "" {
|
||||
return "", ""
|
||||
}
|
||||
slash := strings.IndexRune(absPath, '/')
|
||||
// Bucket but no path
|
||||
if slash < 0 {
|
||||
return absPath, ""
|
||||
}
|
||||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
259
backend/filelu/filelu_object.go
Normal file
259
backend/filelu/filelu_object.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a FileLu object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// NewObject creates a new Object for the given remote path
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
var filePath string
|
||||
filePath = path.Join(f.root, remote)
|
||||
filePath = "/" + strings.Trim(filePath, "/")
|
||||
|
||||
// Get File code
|
||||
fileCode, err := f.getFileCode(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Get File info
|
||||
fileInfos, err := f.getFileInfo(ctx, fileCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
|
||||
fileInfo := fileInfos.Result[0]
|
||||
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
|
||||
|
||||
returnedRemote := remote
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: returnedRemote,
|
||||
size: size,
|
||||
modTime: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the object for reading
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
filePath := path.Join(o.fs.root, o.remote)
|
||||
// Get direct link
|
||||
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get direct link: %w", err)
|
||||
}
|
||||
|
||||
o.size = size
|
||||
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reader io.ReadCloser
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create download request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Wrap the response body to handle offset and count
|
||||
currentContents, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
if offset > int64(len(currentContents)) {
|
||||
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
|
||||
}
|
||||
currentContents = currentContents[offset:]
|
||||
}
|
||||
if count > 0 && count < int64(len(currentContents)) {
|
||||
currentContents = currentContents[:count]
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewReader(currentContents))
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Update updates the object with new data
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() <= 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := o.fs.uploadFile(ctx, in, o.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
o.size = src.Size()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from FileLu
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
|
||||
|
||||
err := o.fs.deleteFile(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns the MD5 hash of an object
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
var fileCode string
|
||||
if isFileCode(o.fs.root) {
|
||||
fileCode = o.fs.root
|
||||
} else {
|
||||
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) > 1 && len(match[1]) == 12 {
|
||||
fileCode = match[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if fileCode == "" {
|
||||
return "", fmt.Errorf("no valid file code found in the remote path")
|
||||
}
|
||||
|
||||
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
|
||||
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Hash string `json:"hash"`
|
||||
} `json:"result"`
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
||||
}
|
||||
|
||||
return result.Result[0].Hash, nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the object
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable indicates whether the object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
16
backend/filelu/filelu_test.go
Normal file
16
backend/filelu/filelu_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package filelu_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests for the FileLu backend
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileLu:",
|
||||
NilObject: nil,
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
||||
15
backend/filelu/utils.go
Normal file
15
backend/filelu/utils.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
|
||||
func parseStorageToBytes(storage string) (int64, error) {
|
||||
var gb float64
|
||||
_, err := fmt.Sscanf(storage, "%f", &gb)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse storage: %w", err)
|
||||
}
|
||||
return int64(gb * 1024 * 1024 * 1024), nil
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -162,6 +163,16 @@ Enabled by default. Use 0 to disable.`,
|
||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "allow_insecure_tls_ciphers",
|
||||
Help: `Allow insecure TLS ciphers
|
||||
|
||||
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
|
||||
|
||||
- TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shut_timeout",
|
||||
Help: "Maximum time to wait for data connection closing status.",
|
||||
@@ -185,6 +196,14 @@ Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -235,6 +254,7 @@ type Options struct {
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
@@ -248,6 +268,7 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
@@ -266,6 +287,7 @@ type Fs struct {
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -396,6 +418,14 @@ func (f *Fs) tlsConfig() *tls.Config {
|
||||
if f.opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
if f.opt.AllowInsecureTLSCiphers {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
}
|
||||
}
|
||||
return tlsConfig
|
||||
}
|
||||
@@ -413,11 +443,26 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
|
||||
} else {
|
||||
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
|
||||
}
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||
// to correct that here.
|
||||
var dialPort string
|
||||
_, dialPort, err = net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
@@ -631,6 +676,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
|
||||
@@ -52,7 +52,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
ci.Timeout = saveTimeout
|
||||
}()
|
||||
ci.LowLevelRetries = 1
|
||||
ci.Timeout = idleTimeout
|
||||
ci.Timeout = fs.Duration(idleTimeout)
|
||||
|
||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
||||
fixFs := deriveFs(ctx, t, f, settings{
|
||||
|
||||
@@ -194,33 +194,9 @@ type DeleteResponse struct {
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
// DirectUploadURL returns the direct upload URL for Gofile
|
||||
func DirectUploadURL() string {
|
||||
return "https://upload.gofile.io/uploadfile"
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
|
||||
@@ -8,13 +8,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile/api"
|
||||
@@ -37,8 +35,6 @@ const (
|
||||
maxSleep = 20 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.gofile.io"
|
||||
serversExpiry = 60 * time.Second // check for new upload servers this often
|
||||
serversActive = 2 // choose this many closest upload servers to use
|
||||
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
|
||||
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
|
||||
)
|
||||
@@ -135,9 +131,6 @@ type Fs struct {
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
serversMu *sync.Mutex // protect the servers info below
|
||||
servers []api.Server // upload servers we can use
|
||||
serversChecked time.Time // time the servers were refreshed
|
||||
}
|
||||
|
||||
// Object describes a gofile object
|
||||
@@ -316,7 +309,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
serversMu: new(sync.Mutex),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
@@ -435,98 +427,6 @@ func (f *Fs) readRootFolderID(ctx context.Context, m configmap.Mapper) (err erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find the top n servers measured by response time
|
||||
func (f *Fs) bestServers(ctx context.Context, servers []api.Server, n int) (newServers []api.Server) {
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(10*time.Second))
|
||||
defer cancel()
|
||||
|
||||
if n > len(servers) {
|
||||
n = len(servers)
|
||||
}
|
||||
results := make(chan int, len(servers))
|
||||
|
||||
// Test how long the servers take to respond
|
||||
for i := range servers {
|
||||
i := i // for closure
|
||||
go func() {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: servers[i].Root(),
|
||||
}
|
||||
var result api.UploadServerStatus
|
||||
start := time.Now()
|
||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
ping := time.Since(start)
|
||||
err = result.Err(err)
|
||||
if err != nil {
|
||||
results <- -1 // send a -ve number on error
|
||||
return
|
||||
}
|
||||
fs.Debugf(nil, "Upload server %v responded in %v", &servers[i], ping)
|
||||
results <- i
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for n servers to respond
|
||||
newServers = make([]api.Server, 0, n)
|
||||
for range servers {
|
||||
i := <-results
|
||||
if i >= 0 {
|
||||
newServers = append(newServers, servers[i])
|
||||
}
|
||||
if len(newServers) >= n {
|
||||
break
|
||||
}
|
||||
}
|
||||
return newServers
|
||||
}
|
||||
|
||||
// Clear all the upload servers - call on an error
|
||||
func (f *Fs) clearServers() {
|
||||
f.serversMu.Lock()
|
||||
defer f.serversMu.Unlock()
|
||||
|
||||
fs.Debugf(f, "Clearing upload servers")
|
||||
f.servers = nil
|
||||
}
|
||||
|
||||
// Gets an upload server
|
||||
func (f *Fs) getServer(ctx context.Context) (server *api.Server, err error) {
|
||||
f.serversMu.Lock()
|
||||
defer f.serversMu.Unlock()
|
||||
|
||||
if len(f.servers) == 0 || time.Since(f.serversChecked) >= serversExpiry {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/servers",
|
||||
}
|
||||
var result api.ServersResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err = result.Err(err); err != nil {
|
||||
if len(f.servers) == 0 {
|
||||
return nil, fmt.Errorf("failed to read upload servers: %w", err)
|
||||
}
|
||||
fs.Errorf(f, "failed to read new upload servers: %v", err)
|
||||
} else {
|
||||
// Find the top servers measured by response time
|
||||
f.servers = f.bestServers(ctx, result.Data.Servers, serversActive)
|
||||
f.serversChecked = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
if len(f.servers) == 0 {
|
||||
return nil, errors.New("no upload servers found")
|
||||
}
|
||||
|
||||
// Pick a server at random since we've already found the top ones
|
||||
i := rand.Intn(len(f.servers))
|
||||
return &f.servers[i], nil
|
||||
}
|
||||
|
||||
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||
func (f *Fs) rootSlash() string {
|
||||
if f.root == "" {
|
||||
@@ -1526,13 +1426,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Find an upload server
|
||||
server, err := o.fs.getServer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(o, "Using upload server %v", server)
|
||||
|
||||
// If the file exists, delete it after a successful upload
|
||||
if o.id != "" {
|
||||
id := o.id
|
||||
@@ -1561,7 +1454,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
},
|
||||
MultipartContentName: "file",
|
||||
MultipartFileName: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
RootURL: server.URL(),
|
||||
RootURL: api.DirectUploadURL(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1569,10 +1462,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err = result.Err(err); err != nil {
|
||||
if isAPIErr(err, "error-freespace") {
|
||||
fs.Errorf(o, "Upload server out of space - need to retry upload")
|
||||
}
|
||||
o.fs.clearServers()
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
return o.setMetaData(&result.Data)
|
||||
|
||||
@@ -483,6 +483,9 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
|
||||
bucketPath = bucketPath[:len(bucketPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -712,7 +715,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
@@ -959,7 +962,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
|
||||
@@ -43,6 +43,7 @@ var (
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
errReadOnly = errors.New("can't upload files in read only mode")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,19 +53,31 @@ const (
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
// scopes needed for read write access
|
||||
scopesReadWrite = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
scopeAppendOnly,
|
||||
scopeReadOnly,
|
||||
scopeReadWrite,
|
||||
}
|
||||
|
||||
// scopes needed for read only access
|
||||
scopesReadOnly = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadOnly,
|
||||
}
|
||||
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: scopesReadWrite,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
@@ -100,20 +113,26 @@ func init() {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
oauthConfig.Scopes = scopesReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
oauthConfig.Scopes = scopesReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("warning1", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning":
|
||||
case "warning1":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
return fs.ConfigConfirm("warning2", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
|
||||
case "warning2":
|
||||
// Warn the user that rclone can no longer download photos it didnt upload from google photos
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`)
|
||||
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
}
|
||||
@@ -333,7 +352,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
return nil, fmt.Errorf("failed to configure google photos: %w", err)
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
@@ -1120,6 +1139,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
if !album.IsWriteable {
|
||||
if o.fs.opt.ReadOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
|
||||
@@ -371,9 +371,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(info.Capacity)),
|
||||
Used: fs.NewUsageValue(int64(info.Used)),
|
||||
Free: fs.NewUsageValue(int64(info.Remaining)),
|
||||
Total: fs.NewUsageValue(info.Capacity),
|
||||
Used: fs.NewUsageValue(info.Used),
|
||||
Free: fs.NewUsageValue(info.Remaining),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -252,18 +252,14 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
|
||||
}
|
||||
|
||||
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||
if err != nil {
|
||||
// icloud has some weird http codes
|
||||
if resp.StatusCode == 330 {
|
||||
if err != nil && resp != nil && resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
return d.icloud.srv.Call(ctx, opts)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||
|
||||
@@ -421,6 +421,9 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
return uploadFile(ctx, f, in, src.Remote(), options...)
|
||||
}
|
||||
|
||||
@@ -659,6 +662,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if src.Size() == 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
srcRemote := o.Remote()
|
||||
|
||||
@@ -670,7 +676,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
var resp *client.UploadResult
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{
|
||||
FileName: fileName,
|
||||
@@ -725,7 +731,7 @@ func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, opti
|
||||
UseUniqueFileName := new(bool)
|
||||
*UseUniqueFileName = false
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.pacer.CallNoRetry(func() (bool, error) {
|
||||
var res *http.Response
|
||||
var err error
|
||||
res, _, err = f.ik.Upload(ctx, in, client.UploadParam{
|
||||
@@ -794,35 +800,10 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
file, err := srcObj.Open(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uploadFile(ctx, f, file, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
|
||||
@@ -1339,7 +1339,7 @@ func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
for _, v := range seg {
|
||||
newValues = append(newValues, url.PathEscape(v))
|
||||
newValues = append(newValues, url.QueryEscape(v))
|
||||
}
|
||||
return strings.Join(newValues, "/")
|
||||
}
|
||||
|
||||
@@ -461,7 +461,7 @@ func translateErrorsDir(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||
// translateErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||
func translateErrorsObject(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
|
||||
@@ -617,16 +617,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
case 1:
|
||||
// upload file using link from first step
|
||||
var res *http.Response
|
||||
var location string
|
||||
|
||||
// Check to see if we are being redirected
|
||||
opts := &rest.Opts{
|
||||
Method: "HEAD",
|
||||
RootURL: getFirstStepResult.Data.SignURL,
|
||||
Options: options,
|
||||
NoRedirect: true,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, opts)
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
if res != nil {
|
||||
location = res.Header.Get("Location")
|
||||
if location != "" {
|
||||
// set the URL to the new Location
|
||||
opts.RootURL = location
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("head upload URL: %w", err)
|
||||
}
|
||||
|
||||
file := io.MultiReader(bytes.NewReader(first10mBytes), in)
|
||||
|
||||
opts := &rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: getFirstStepResult.Data.SignURL,
|
||||
Options: options,
|
||||
Body: file,
|
||||
ContentLength: &size,
|
||||
}
|
||||
opts.Method = "PUT"
|
||||
opts.Body = file
|
||||
opts.ContentLength = &size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, opts)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || plan9 || js || linux
|
||||
//go:build windows || plan9 || js || wasm || linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !windows && !plan9 && !js && !linux
|
||||
//go:build !windows && !plan9 && !js && !wasm && !linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build plan9 || js
|
||||
//go:build plan9 || js || wasm
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !windows && !plan9 && !js
|
||||
//go:build !windows && !plan9 && !js && !wasm
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -305,6 +305,12 @@ only useful for reading.
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "hashes",
|
||||
Help: `Comma separated list of supported checksum types.`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -331,6 +337,7 @@ type Options struct {
|
||||
NoSparse bool `config:"no_sparse"`
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Hashes fs.CommaSepList `config:"hashes"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
@@ -664,8 +671,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
symlinkFlag := os.ModeSymlink
|
||||
if runtime.GOOS == "windows" {
|
||||
symlinkFlag |= os.ModeIrregular
|
||||
}
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
if f.opt.FollowSymlinks && (mode&symlinkFlag) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
@@ -687,13 +698,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
if (mode&symlinkFlag) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := f.newDirectory(newRemote, fi)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&symlinkFlag != 0 {
|
||||
newRemote += fs.LinkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
@@ -1021,6 +1032,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if len(f.opt.Hashes) > 0 {
|
||||
// Return only configured hashes.
|
||||
// Note: Could have used hash.SupportOnly to limit supported hashes for all hash related features.
|
||||
var supported hash.Set
|
||||
for _, hashName := range f.opt.Hashes {
|
||||
var ht hash.Type
|
||||
if err := ht.Set(hashName); err != nil {
|
||||
fs.Infof(nil, "Invalid token %q in hash string %q", hashName, f.opt.Hashes.String())
|
||||
}
|
||||
supported.Add(ht)
|
||||
}
|
||||
return supported
|
||||
}
|
||||
return hash.Supported()
|
||||
}
|
||||
|
||||
@@ -1090,6 +1114,10 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the requested hash of a file as a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
if r == hash.None {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Check that the underlying file hasn't changed
|
||||
o.fs.objectMetaMu.RLock()
|
||||
oldtime := o.modTime
|
||||
@@ -1197,7 +1225,15 @@ func (o *Object) Storable() bool {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
mode := o.mode
|
||||
o.fs.objectMetaMu.RUnlock()
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
|
||||
// On Windows items with os.ModeIrregular are likely Junction
|
||||
// points so we treat them as symlinks for the purpose of ignoring them.
|
||||
// https://github.com/golang/go/issues/73827
|
||||
symlinkFlag := os.ModeSymlink
|
||||
if runtime.GOOS == "windows" {
|
||||
symlinkFlag |= os.ModeIrregular
|
||||
}
|
||||
if mode&symlinkFlag != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
|
||||
@@ -204,6 +204,23 @@ func TestSymlinkError(t *testing.T) {
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
func TestHashWithTypeNone(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "file.txt"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
h, err := o.Hash(ctx, hash.None)
|
||||
require.Empty(t, h)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build dragonfly || plan9 || js
|
||||
//go:build dragonfly || plan9 || js || wasm
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !windows && !plan9 && !js
|
||||
//go:build !windows && !plan9 && !js && !wasm
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || plan9 || js
|
||||
//go:build windows || plan9 || js || wasm
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -634,7 +634,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
||||
return
|
||||
}
|
||||
|
||||
// itemToEntry converts API item to rclone directory entry
|
||||
// itemToDirEntry converts API item to rclone directory entry
|
||||
// The dirSize return value is:
|
||||
//
|
||||
// <0 - for a file or in case of error
|
||||
|
||||
@@ -17,9 +17,11 @@ Improvements:
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -216,7 +218,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
// srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
|
||||
// Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22.
|
||||
// Relevant issues:
|
||||
// https://github.com/rclone/rclone/issues/8565
|
||||
// https://github.com/meganz/webclient/issues/103
|
||||
clt := fshttp.NewClient(ctx)
|
||||
clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
// Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints
|
||||
// (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025.
|
||||
t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
})
|
||||
srv = mega.New().SetClient(clt)
|
||||
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...any) {
|
||||
@@ -926,9 +946,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Cstrg), // bytes in use
|
||||
Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -749,6 +749,8 @@ func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo,
|
||||
|
||||
// Fetch metadata and update updateInfo if --metadata is in use
|
||||
// modtime will still be set when there is no metadata to set
|
||||
//
|
||||
// May return info=nil and err=nil if there was no metadata to update.
|
||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
@@ -768,6 +770,8 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
}
|
||||
|
||||
// updateMetadata calls Get, Set, and Write
|
||||
//
|
||||
// May return info=nil and err=nil if there was no metadata to update.
|
||||
func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) {
|
||||
_, err = o.meta.Get(ctx) // refresh permissions
|
||||
if err != nil {
|
||||
|
||||
@@ -56,6 +56,7 @@ const (
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.Mebi
|
||||
chunkSizeMultiple = 320 * fs.Kibi
|
||||
maxSinglePartSize = 4 * fs.Mebi
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
@@ -138,6 +139,21 @@ func init() {
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
|
||||
This is disabled by default as uploading using single part uploads
|
||||
causes rclone to use twice the storage on Onedrive business as when
|
||||
rclone sets the modification time after the upload Onedrive creates a
|
||||
new version.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/1716
|
||||
`,
|
||||
Default: fs.SizeSuffix(-1),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
@@ -746,6 +762,7 @@ Examples:
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
@@ -1022,6 +1039,13 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxSinglePartSize {
|
||||
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -1035,6 +1059,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
@@ -1754,7 +1782,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info != nil {
|
||||
err = dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
@@ -1834,7 +1864,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info != nil {
|
||||
err = dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
@@ -2469,6 +2501,10 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
return false, nil
|
||||
}
|
||||
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
|
||||
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
|
||||
time.Sleep(5 * time.Second) // a little delay to help things along
|
||||
return true, err
|
||||
}
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -2563,8 +2599,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
|
||||
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
if size < 0 || size > int64(maxSinglePartSize) {
|
||||
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
@@ -2597,7 +2633,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
|
||||
}
|
||||
return info, o.setMetaData(info)
|
||||
if info != nil {
|
||||
err = o.setMetaData(info)
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@@ -2617,9 +2656,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 {
|
||||
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
|
||||
info, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else if size == 0 {
|
||||
} else if size >= 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, src, options...)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -33,9 +34,46 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: newOptions(),
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `User metadata is stored as opc-meta- keys.`,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"opc-meta-mode": {
|
||||
Help: "File type and mode",
|
||||
Type: "octal, unix style",
|
||||
Example: "0100664",
|
||||
},
|
||||
"opc-meta-uid": {
|
||||
Help: "User ID of owner",
|
||||
Type: "decimal number",
|
||||
Example: "500",
|
||||
},
|
||||
"opc-meta-gid": {
|
||||
Help: "Group ID of owner",
|
||||
Type: "decimal number",
|
||||
Example: "500",
|
||||
},
|
||||
"opc-meta-atime": {
|
||||
Help: "Time of last access",
|
||||
Type: "ISO 8601",
|
||||
Example: "2025-06-30T22:27:43-04:00",
|
||||
},
|
||||
"opc-meta-mtime": {
|
||||
Help: "Time of last modification",
|
||||
Type: "ISO 8601",
|
||||
Example: "2025-06-30T22:27:43-04:00",
|
||||
},
|
||||
"opc-meta-btime": {
|
||||
Help: "Time of file birth (creation)",
|
||||
Type: "ISO 8601",
|
||||
Example: "2025-06-30T22:27:43-04:00",
|
||||
},
|
||||
}
|
||||
|
||||
// Fs represents a remote object storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
@@ -82,6 +120,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMetadata: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
@@ -688,6 +727,38 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = make(fs.Metadata, len(o.meta)+7)
|
||||
for k, v := range o.meta {
|
||||
switch k {
|
||||
case metaMtime:
|
||||
if modTime, err := swift.FloatStringToTime(v); err == nil {
|
||||
metadata["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
case metaMD5Hash:
|
||||
// don't write hash metadata
|
||||
default:
|
||||
metadata[k] = v
|
||||
}
|
||||
}
|
||||
if o.mimeType != "" {
|
||||
metadata["content-type"] = o.mimeType
|
||||
}
|
||||
|
||||
if !o.lastModified.IsZero() {
|
||||
metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
//go:build plan9 || solaris || js || wasm
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -378,12 +378,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
// XOpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
// It truncates any existing object.
|
||||
//
|
||||
// OpenWriterAt disabled because it seems to have been disabled at pcloud
|
||||
// PUT /file_open?flags=XXX&folderid=XXX&name=XXX HTTP/1.1
|
||||
//
|
||||
// {
|
||||
// "result": 2003,
|
||||
// "error": "Access denied. You do not have permissions to perform this operation."
|
||||
// }
|
||||
func (f *Fs) XOpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
client, err := f.newSingleConnClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client: %w", err)
|
||||
|
||||
@@ -155,6 +155,7 @@ func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && !info.Links.ApplicationOctetStream.Valid() {
|
||||
time.Sleep(5 * time.Second)
|
||||
return true, errors.New("no link")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
|
||||
333
backend/pikpak/multipart.go
Normal file
333
backend/pikpak/multipart.go
Normal file
@@ -0,0 +1,333 @@
|
||||
package pikpak
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
bufferSize = 1024 * 1024 // default size of the pages used in the reader
|
||||
bufferCacheSize = 64 // max number of buffers to keep in cache
|
||||
bufferCacheFlushTime = 5 * time.Second // flush the cached buffers after this long
|
||||
)
|
||||
|
||||
// bufferPool is a global pool of buffers
|
||||
var (
|
||||
bufferPool *pool.Pool
|
||||
bufferPoolOnce sync.Once
|
||||
)
|
||||
|
||||
// get a buffer pool
|
||||
func getPool() *pool.Pool {
|
||||
bufferPoolOnce.Do(func() {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
// Initialise the buffer pool when used
|
||||
bufferPool = pool.New(bufferCacheFlushTime, bufferSize, bufferCacheSize, ci.UseMmap)
|
||||
})
|
||||
return bufferPool
|
||||
}
|
||||
|
||||
// NewRW gets a pool.RW using the multipart pool
|
||||
func NewRW() *pool.RW {
|
||||
return pool.NewRW(getPool())
|
||||
}
|
||||
|
||||
// Upload does a multipart upload in parallel
|
||||
func (w *pikpakChunkWriter) Upload(ctx context.Context) (err error) {
|
||||
// make concurrency machinery
|
||||
tokens := pacer.NewTokenDispenser(w.con)
|
||||
|
||||
uploadCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
defer atexit.OnError(&err, func() {
|
||||
cancel()
|
||||
fs.Debugf(w.o, "multipart upload: Cancelling...")
|
||||
errCancel := w.Abort(ctx)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(w.o, "multipart upload: failed to cancel: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(uploadCtx)
|
||||
finished = false
|
||||
off int64
|
||||
size = w.size
|
||||
chunkSize = w.chunkSize
|
||||
)
|
||||
|
||||
// Do the accounting manually
|
||||
in, acc := accounting.UnWrapAccounting(w.in)
|
||||
|
||||
for partNum := int64(0); !finished; partNum++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
tokens.Get()
|
||||
rw := NewRW()
|
||||
if acc != nil {
|
||||
rw.SetAccounting(acc.AccountRead)
|
||||
}
|
||||
|
||||
free := func() {
|
||||
// return the memory and token
|
||||
_ = rw.Close() // Can't return an error
|
||||
tokens.Put()
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int64
|
||||
n, err = io.CopyN(rw, in, chunkSize)
|
||||
if err == io.EOF {
|
||||
if n == 0 && partNum != 0 { // end if no data and if not first chunk
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return fmt.Errorf("multipart upload: failed to read source: %w", err)
|
||||
}
|
||||
|
||||
partNum := partNum
|
||||
partOff := off
|
||||
off += n
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
fs.Debugf(w.o, "multipart upload: starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(partOff), fs.SizeSuffix(size))
|
||||
_, err = w.WriteChunk(gCtx, int32(partNum), rw)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.Close(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload: failed to finalise: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// state of ChunkWriter
|
||||
type pikpakChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
con int
|
||||
f *Fs
|
||||
o *Object
|
||||
in io.Reader
|
||||
mu sync.Mutex
|
||||
completedParts []types.CompletedPart
|
||||
client *s3.Client
|
||||
mOut *s3.CreateMultipartUploadOutput
|
||||
}
|
||||
|
||||
func (f *Fs) newChunkWriter(ctx context.Context, remote string, size int64, p *api.ResumableParams, in io.Reader, options ...fs.OpenOption) (w *pikpakChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48 GiB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(maxUploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(o, size, maxUploadParts, chunkSize)
|
||||
}
|
||||
|
||||
client, err := f.newS3Client(ctx, p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create upload client: %w", err)
|
||||
}
|
||||
w = &pikpakChunkWriter{
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
con: max(1, f.opt.UploadConcurrency),
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
completedParts: make([]types.CompletedPart, 0),
|
||||
client: client,
|
||||
}
|
||||
|
||||
req := &s3.CreateMultipartUploadInput{
|
||||
Bucket: &p.Bucket,
|
||||
Key: &p.Key,
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.CacheControl = aws.String(value)
|
||||
case "content-disposition":
|
||||
req.ContentDisposition = aws.String(value)
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = aws.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = aws.String(value)
|
||||
}
|
||||
}
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
w.mOut, err = w.client.CreateMultipartUpload(ctx, req)
|
||||
return w.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create multipart upload failed: %w", err)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload: %q initiated", *w.mOut.UploadId)
|
||||
return
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (w *pikpakChunkWriter) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// add a part number and etag to the completed parts
|
||||
func (w *pikpakChunkWriter) addCompletedPart(part types.CompletedPart) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.completedParts = append(w.completedParts, part)
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *pikpakChunkWriter) WriteChunk(ctx context.Context, chunkNumber int32, reader io.ReadSeeker) (currentChunkSize int64, err error) {
|
||||
if chunkNumber < 0 {
|
||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
partNumber := chunkNumber + 1
|
||||
var res *s3.UploadPartOutput
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// Discover the size by seeking to the end
|
||||
currentChunkSize, err = reader.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// rewind the reader on retry and after reading md5
|
||||
_, err := reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
res, err = w.client.UploadPart(ctx, &s3.UploadPartInput{
|
||||
Bucket: w.mOut.Bucket,
|
||||
Key: w.mOut.Key,
|
||||
UploadId: w.mOut.UploadId,
|
||||
PartNumber: &partNumber,
|
||||
Body: reader,
|
||||
})
|
||||
if err != nil {
|
||||
if chunkNumber <= 8 {
|
||||
return w.shouldRetry(ctx, err)
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", partNumber, currentChunkSize, err)
|
||||
}
|
||||
|
||||
w.addCompletedPart(types.CompletedPart{
|
||||
PartNumber: &partNumber,
|
||||
ETag: res.ETag,
|
||||
})
|
||||
|
||||
fs.Debugf(w.o, "multipart upload: wrote chunk %d with %v bytes", partNumber, currentChunkSize)
|
||||
return currentChunkSize, err
|
||||
}
|
||||
|
||||
// Abort the multipart upload
|
||||
func (w *pikpakChunkWriter) Abort(ctx context.Context) (err error) {
|
||||
// Abort the upload session
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
_, err = w.client.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{
|
||||
Bucket: w.mOut.Bucket,
|
||||
Key: w.mOut.Key,
|
||||
UploadId: w.mOut.UploadId,
|
||||
})
|
||||
return w.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to abort multipart upload %q: %w", *w.mOut.UploadId, err)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload: %q aborted", *w.mOut.UploadId)
|
||||
return
|
||||
}
|
||||
|
||||
// Close and finalise the multipart upload
|
||||
func (w *pikpakChunkWriter) Close(ctx context.Context) (err error) {
|
||||
// sort the completed parts by part number
|
||||
sort.Slice(w.completedParts, func(i, j int) bool {
|
||||
return *w.completedParts[i].PartNumber < *w.completedParts[j].PartNumber
|
||||
})
|
||||
// Finalise the upload session
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
_, err = w.client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: w.mOut.Bucket,
|
||||
Key: w.mOut.Key,
|
||||
UploadId: w.mOut.UploadId,
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: w.completedParts,
|
||||
},
|
||||
})
|
||||
return w.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload: %q finished", *w.mOut.UploadId)
|
||||
return
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user