mirror of
https://github.com/rclone/rclone.git
synced 2026-01-03 09:03:50 +00:00
Compare commits
267 Commits
fix-8062-d
...
fix-auth-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69ae5f2aaf | ||
|
|
c837664653 | ||
|
|
77429b154e | ||
|
|
39b8f17ebb | ||
|
|
81ecfb0f64 | ||
|
|
656e789c5b | ||
|
|
fe19184084 | ||
|
|
b4990cd858 | ||
|
|
8e955c6b13 | ||
|
|
3a5ddfcd3c | ||
|
|
ac3f7a87c3 | ||
|
|
4e9b63e141 | ||
|
|
7fd7fe3c82 | ||
|
|
9dff45563d | ||
|
|
83cf8fb821 | ||
|
|
32e79a5c5c | ||
|
|
fc44a8114e | ||
|
|
657172ef77 | ||
|
|
71eb4199c3 | ||
|
|
ac3c21368d | ||
|
|
db71b2bd5f | ||
|
|
8cfe42d09f | ||
|
|
e673a28a72 | ||
|
|
59889ce46b | ||
|
|
62e8a01e7e | ||
|
|
87eaf37629 | ||
|
|
7c7606a6cf | ||
|
|
dbb21165d4 | ||
|
|
375953cba3 | ||
|
|
af5385b344 | ||
|
|
347be176af | ||
|
|
bf5a4774c6 | ||
|
|
0275d3edf2 | ||
|
|
be53ae98f8 | ||
|
|
0d9fe51632 | ||
|
|
03bd795221 | ||
|
|
5a4026ccb4 | ||
|
|
b1d4de69c2 | ||
|
|
5316acd046 | ||
|
|
2c72842c10 | ||
|
|
4a81f12c26 | ||
|
|
aabda1cda2 | ||
|
|
572fe20f8e | ||
|
|
2fd4c45b34 | ||
|
|
ec5489e23f | ||
|
|
6898375a2d | ||
|
|
d413443a6a | ||
|
|
5039747f26 | ||
|
|
11ba4ac539 | ||
|
|
b4ed7fb7d7 | ||
|
|
719473565e | ||
|
|
bd7278d7e9 | ||
|
|
45ba81c726 | ||
|
|
530658e0cc | ||
|
|
b742705d0c | ||
|
|
cd3b08d8cf | ||
|
|
009660a489 | ||
|
|
4b6c7c6d84 | ||
|
|
a7db375f5d | ||
|
|
101dcfe157 | ||
|
|
aec87b74d3 | ||
|
|
91c8f92ccb | ||
|
|
965bf19065 | ||
|
|
15ef3b90fa | ||
|
|
f6efaf2a63 | ||
|
|
0e7c495395 | ||
|
|
ff0ded8f11 | ||
|
|
110bf468a4 | ||
|
|
d4e86f4d8b | ||
|
|
6091a0362b | ||
|
|
33d2747829 | ||
|
|
c9e5f45d73 | ||
|
|
2f66537514 | ||
|
|
a491312c7d | ||
|
|
45b7690867 | ||
|
|
30ef1ddb23 | ||
|
|
424d8e3123 | ||
|
|
04dfa6d923 | ||
|
|
fdff1a54ee | ||
|
|
42240f4b5d | ||
|
|
7692ef289f | ||
|
|
bfb7b88371 | ||
|
|
5f70918e2c | ||
|
|
abf11271fe | ||
|
|
a36e89bb61 | ||
|
|
35614acf59 | ||
|
|
7e4b8e33f5 | ||
|
|
5151a663f0 | ||
|
|
b85a1b684b | ||
|
|
8fa8f146fa | ||
|
|
6cad0a013e | ||
|
|
aa743cbc60 | ||
|
|
a389a2979b | ||
|
|
d6f0d1d349 | ||
|
|
4ed6960d95 | ||
|
|
731af0c0ab | ||
|
|
5499fd3b59 | ||
|
|
e0e697ca11 | ||
|
|
05f000b076 | ||
|
|
a34c839514 | ||
|
|
6a217c7dc1 | ||
|
|
e1748a3183 | ||
|
|
bc08e05a00 | ||
|
|
9218b69afe | ||
|
|
0ce2e12d9f | ||
|
|
7224b76801 | ||
|
|
d2398ccb59 | ||
|
|
0988fd9e9f | ||
|
|
51cde23e82 | ||
|
|
caac95ff54 | ||
|
|
19f4580aca | ||
|
|
27f448d14d | ||
|
|
500698c5be | ||
|
|
91af6da068 | ||
|
|
b8835fe7b4 | ||
|
|
48d9e88e8f | ||
|
|
4e7ee9310e | ||
|
|
d629102fa6 | ||
|
|
db1ed69693 | ||
|
|
06657c49a0 | ||
|
|
f1d2f2b2c8 | ||
|
|
a5abe4b8b3 | ||
|
|
c0339327be | ||
|
|
353bc3130e | ||
|
|
126f00882b | ||
|
|
44c3f5e1e8 | ||
|
|
c47c94e485 | ||
|
|
1f328fbcfd | ||
|
|
7f1240516e | ||
|
|
f9946b37f9 | ||
|
|
96fe25cf0a | ||
|
|
a176d4cbda | ||
|
|
e704e33045 | ||
|
|
2f3e90f671 | ||
|
|
65012beea4 | ||
|
|
704217b698 | ||
|
|
6ade1055d5 | ||
|
|
6a983d601c | ||
|
|
eaafae95fa | ||
|
|
5ca1436c24 | ||
|
|
c46e93cc42 | ||
|
|
66943d3d79 | ||
|
|
a78bc093de | ||
|
|
2446c4928d | ||
|
|
e11e679e90 | ||
|
|
ba8e538173 | ||
|
|
40111ba5e1 | ||
|
|
ab58ae5b03 | ||
|
|
ca8860177e | ||
|
|
d65d1a44b3 | ||
|
|
c1763a3f95 | ||
|
|
964fcd5f59 | ||
|
|
c6281a1217 | ||
|
|
ff3f8f0b33 | ||
|
|
2d844a26c3 | ||
|
|
1b68492c85 | ||
|
|
acd5a893e2 | ||
|
|
0214a59a8c | ||
|
|
6079cab090 | ||
|
|
bf57087a6e | ||
|
|
d8bc542ffc | ||
|
|
01ccf204f4 | ||
|
|
84b64dcdf9 | ||
|
|
8cc1020a58 | ||
|
|
1e2b354456 | ||
|
|
f639cd9c78 | ||
|
|
e50f995d87 | ||
|
|
abe884e744 | ||
|
|
173b2ac956 | ||
|
|
1317fdb9b8 | ||
|
|
1072173d58 | ||
|
|
df19c6f7bf | ||
|
|
ee72554fb9 | ||
|
|
abb4f77568 | ||
|
|
ca2b27422f | ||
|
|
740f6b318c | ||
|
|
f307d929a8 | ||
|
|
ceea6753ee | ||
|
|
2bafbf3c04 | ||
|
|
3e14ba54b8 | ||
|
|
2f7a30cf61 | ||
|
|
0ad925278d | ||
|
|
e3053350f3 | ||
|
|
b9207e5727 | ||
|
|
40159e7a16 | ||
|
|
16baa24964 | ||
|
|
72f06bcc4b | ||
|
|
c527dd8c9c | ||
|
|
29fd894189 | ||
|
|
175aa07cdd | ||
|
|
75257fc9cd | ||
|
|
53ff3b3b32 | ||
|
|
8b4b59412d | ||
|
|
264c9fb2c0 | ||
|
|
1b10cd3732 | ||
|
|
d97492cbc3 | ||
|
|
82a510e793 | ||
|
|
9f2c590e13 | ||
|
|
11a90917ec | ||
|
|
8ca7b2af07 | ||
|
|
a19ddffe92 | ||
|
|
3e2c0f8c04 | ||
|
|
589458d1fe | ||
|
|
69897b97fb | ||
|
|
4db09331c6 | ||
|
|
fcd3b88332 | ||
|
|
1ca3f12672 | ||
|
|
e7a0fd0f70 | ||
|
|
c23c59544d | ||
|
|
9dec3de990 | ||
|
|
5caa695c79 | ||
|
|
8400809900 | ||
|
|
e49516d5f4 | ||
|
|
9614fc60f2 | ||
|
|
51db76fd47 | ||
|
|
17e7ccfad5 | ||
|
|
8a6fc8535d | ||
|
|
c053429b9c | ||
|
|
18989fbf85 | ||
|
|
a7451c6a77 | ||
|
|
5147d1101c | ||
|
|
11ad2a1316 | ||
|
|
3c7ad8d961 | ||
|
|
a3e8fb584a | ||
|
|
9b4b3033da | ||
|
|
94997d25d2 | ||
|
|
19458e8459 | ||
|
|
7d32da441e | ||
|
|
22e13eea47 | ||
|
|
de9b593f02 | ||
|
|
b2b4f8196c | ||
|
|
84cebb6872 | ||
|
|
cb9f4f8461 | ||
|
|
498d9cfa85 | ||
|
|
109e4ed0ed | ||
|
|
353270263a | ||
|
|
f8d782c02d | ||
|
|
3dec664a19 | ||
|
|
a849fd59f0 | ||
|
|
462a1cf491 | ||
|
|
0b7b3cacdc | ||
|
|
976103d50b | ||
|
|
192524c004 | ||
|
|
28667f58bf | ||
|
|
c669f4e218 | ||
|
|
1a9e6a527d | ||
|
|
8c48cadd9c | ||
|
|
76e1ba8c46 | ||
|
|
232e4cd18f | ||
|
|
88141928f2 | ||
|
|
a2a0388036 | ||
|
|
48543d38e8 | ||
|
|
eceb390152 | ||
|
|
f4deffdc96 | ||
|
|
c172742cef | ||
|
|
7daed30754 | ||
|
|
b1b4c7f27b | ||
|
|
ed84553dc1 | ||
|
|
c94edbb76b | ||
|
|
2dcb327bc0 | ||
|
|
874d66658e | ||
|
|
3af757e26d | ||
|
|
fef1b61585 | ||
|
|
3fca7a60a5 | ||
|
|
6b3f41fa0c | ||
|
|
3d0ee47aa2 | ||
|
|
da70088b11 |
394
.github/workflows/build.yml
vendored
Normal file
394
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,394 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build.yml" -*-
|
||||
|
||||
name: build
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.23
|
||||
os: ubuntu-latest
|
||||
go: '1.23'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
||||
brew untap --force homebrew/core
|
||||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
brew install git-annex git-annex-remote-rclone
|
||||
if: matrix.os == 'macos-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Rclone version
|
||||
shell: bash
|
||||
run: |
|
||||
rclone version
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Run librclone tests
|
||||
shell: bash
|
||||
run: |
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
librclone/python/test_rclone.py
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||
make ci_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
@@ -1,7 +1,6 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
@@ -33,59 +32,27 @@ jobs:
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:TESTING-latest
|
||||
rclone/rclone:TESTING-latest
|
||||
ghcr.io/rclone/rclone:TESTING-${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:TESTING-${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
ghcr.io/rclone/rclone:TESTING-${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:TESTING-${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
ghcr.io/rclone/rclone:TESTING-${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
rclone/rclone:TESTING-${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
@@ -118,5 +85,5 @@ jobs:
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=TESTING-latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=TESTING-${VER#v}
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
|
||||
15
.github/workflows/notify.yml
vendored
Normal file
15
.github/workflows/notify.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Notify users based on issue labels
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
notify:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
||||
with:
|
||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
||||
recipients: |
|
||||
Support Contract=@rclone/support
|
||||
14
.github/workflows/winget.yml
vendored
Normal file
14
.github/workflows/winget.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
@@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
|
||||
@@ -12,6 +12,8 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
|
||||
2431
MANUAL.html
generated
2431
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2523
MANUAL.txt
generated
2523
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
6
Makefile
6
Makefile
@@ -144,10 +144,14 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
||||
@@ -66,6 +66,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
@@ -92,6 +93,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
@@ -109,6 +111,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
|
||||
25
RELEASE.md
25
RELEASE.md
@@ -47,13 +47,20 @@ Early in the next release cycle update the dependencies.
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
then go to manual mode. `go1.20` here is the lowest supported version
|
||||
|
||||
go 1.22.0
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.20 -compat=1.20
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
@@ -86,6 +93,16 @@ build.
|
||||
Once it compiles locally, push it on a test branch and commit fixes
|
||||
until the tests pass.
|
||||
|
||||
### Major versions
|
||||
|
||||
The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
|
||||
Expect API breakage when updating major versions.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
@@ -168,6 +185,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
||||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,149 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
func TestBlockIDCreator(t *testing.T) {
|
||||
// Check creation and random number
|
||||
bic, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
bic2, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bic.random, bic2.random)
|
||||
assert.NotEqual(t, bic.random, [8]byte{})
|
||||
|
||||
// Set random to known value for tests
|
||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||
|
||||
// Check creation of ID
|
||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||
got := bic.newBlockID(chunkNumber)
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||
|
||||
// Test checkID is working
|
||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||
}
|
||||
|
||||
func (f *Fs) testFeatures(t *testing.T) {
|
||||
// Check first feature flags are set on this remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
type ReadSeekCloser struct {
|
||||
*strings.Reader
|
||||
}
|
||||
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage a block at remote but don't commit it
|
||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||
var (
|
||||
containerName, blobPath = f.split(remote)
|
||||
containerClient = f.cntSVC(containerName)
|
||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||
data = "uncommitted data"
|
||||
blockID = "1"
|
||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||
)
|
||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the block is staged but not committed
|
||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
if *block.Name == blockIDBase64 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||
}
|
||||
|
||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||
//
|
||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
//
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
remote = "testBlob"
|
||||
)
|
||||
|
||||
// Multipart copy the blob please
|
||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||
f.opt.UseCopyBlob = false
|
||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||
defer func() {
|
||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||
}()
|
||||
|
||||
// Create a blob with uncommitted blocks
|
||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||
|
||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||
|
||||
// Check the object does not exist
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||
size := 4*int(f.opt.ChunkSize) - 1
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), o.Size())
|
||||
|
||||
// Create a new blob with uncommitted blocks
|
||||
newRemote := "testBlob2"
|
||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||
|
||||
// Copy over that block
|
||||
dst, err := f.Copy(ctx, o, newRemote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check basics
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
assert.Equal(t, newRemote, dst.Remote())
|
||||
|
||||
// Check contents
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
|
||||
// Remove the object
|
||||
require.NoError(t, dst.Remove(ctx))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
}
|
||||
|
||||
@@ -15,13 +15,17 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
|
||||
@@ -237,6 +237,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -319,10 +343,12 @@ type Options struct {
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||
@@ -393,8 +419,10 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
policyClientOptions := policy.ClientOptions{
|
||||
Transport: newTransporter(ctx),
|
||||
}
|
||||
backup := service.ShareTokenIntentBackup
|
||||
clientOpt := service.ClientOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
FileRequestIntent: &backup,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
||||
@@ -412,7 +440,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
@@ -423,6 +452,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.SASURL != "":
|
||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||
if err != nil {
|
||||
@@ -897,7 +933,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
|
||||
@@ -42,9 +42,10 @@ type Bucket struct {
|
||||
|
||||
// LifecycleRule is a single lifecycle rule
|
||||
type LifecycleRule struct {
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -1318,16 +1319,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
// Check current version of the file
|
||||
if deleteHidden && object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
}
|
||||
last = remote
|
||||
tr.Done(ctx, nil)
|
||||
@@ -2231,6 +2238,7 @@ This will dump something like this showing the lifecycle rules.
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
@@ -2257,8 +2265,9 @@ overwrites will still cause versions to be made.
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2278,14 +2287,23 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
}
|
||||
newRule.DaysFromUploadingToHiding = &days
|
||||
}
|
||||
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
|
||||
}
|
||||
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
|
||||
}
|
||||
bucketName, _ := f.split("")
|
||||
if bucketName == "" {
|
||||
return nil, errors.New("bucket required")
|
||||
|
||||
}
|
||||
|
||||
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
||||
|
||||
var bucket *api.Bucket
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
||||
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
@@ -463,24 +465,161 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --b2-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should be unchanged after dry run
|
||||
before := listAllFiles(ctx, t, f, dirName)
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
after := listAllFiles(ctx, t, f, dirName)
|
||||
assert.Equal(t, before, after)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should reflect current state after cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// B2CleanupHidden tests cleaning up hidden files
|
||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
||||
dirName := "unfinished"
|
||||
fileCount := 5
|
||||
expectedFiles := []string{}
|
||||
for i := 1; i < fileCount; i++ {
|
||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
||||
expectedFiles = append(expectedFiles, fileName)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: fileName,
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// Listing should not change after dry run
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
// Listing should be empty after real cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, []string{})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
||||
bucket, directory := f.split(dirName)
|
||||
foundFiles := []string{}
|
||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
foundFiles = append(foundFiles, object.Name)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(foundFiles)
|
||||
return foundFiles
|
||||
}
|
||||
|
||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
||||
sort.Strings(expectedFiles)
|
||||
assert.Equal(t, expectedFiles, foundFiles)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opt := map[string]string{}
|
||||
|
||||
t.Run("InitState", func(t *testing.T) {
|
||||
// There should be no lifecycle rules at the outset
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// There should still be no lifecycle rules after each dry run operation
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
})
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -43,9 +43,9 @@ import (
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,12 +64,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -256,8 +254,10 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if block == nil {
|
||||
return nil, errors.New("box: failed to PEM decode private key")
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
}
|
||||
@@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
// fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
@@ -966,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check if dest already exists
|
||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if item != nil { // dest already exists, need to copy to temp name and then move
|
||||
tempSuffix := "-rclone-copy-" + random.String(8)
|
||||
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
||||
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
||||
err = f.deleteObject(ctx, item.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.Move(ctx, tempObj, remote)
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
48
backend/cloudinary/api/types.go
Normal file
48
backend/cloudinary/api/types.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Package api has type definitions for cloudinary
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CloudinaryEncoder extends the built-in encoder
|
||||
type CloudinaryEncoder interface {
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
FromStandardPath(string) string
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
FromStandardName(string) string
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string) string
|
||||
// Encoded root of the remote (as passed into NewFs)
|
||||
FromStandardFullPath(string) string
|
||||
}
|
||||
|
||||
// UpdateOptions was created to pass options from Update to Put
|
||||
type UpdateOptions struct {
|
||||
PublicID string
|
||||
ResourceType string
|
||||
DeliveryType string
|
||||
AssetFolder string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// Header formats the option as a string
|
||||
func (o *UpdateOptions) Header() (string, string) {
|
||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *UpdateOptions) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// String formats the option into human-readable form
|
||||
func (o *UpdateOptions) String() string {
|
||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
711
backend/cloudinary/cloudinary.go
Normal file
711
backend/cloudinary/cloudinary.go
Normal file
@@ -0,0 +1,711 @@
|
||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
||||
package cloudinary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudinary/cloudinary-go/v2"
|
||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/zeebo/blake3"
|
||||
)
|
||||
|
||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
||||
func cldPathDir(somePath string) string {
|
||||
if somePath == "" || somePath == "." {
|
||||
return somePath
|
||||
}
|
||||
dir := path.Dir(somePath)
|
||||
if dir == "." {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "cloudinary",
|
||||
Description: "Cloudinary",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "cloud_name",
|
||||
Help: "Cloudinary Environment Name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "Cloudinary API Key",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_secret",
|
||||
Help: "Cloudinary API Secret",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "upload_prefix",
|
||||
Help: "Specify the API endpoint for environments out of the US",
|
||||
},
|
||||
{
|
||||
Name: "upload_preset",
|
||||
Help: "Upload Preset to select asset manipulation on upload",
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
{
|
||||
Name: "eventually_consistent_delay",
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
CloudName string `config:"cloud_name"`
|
||||
APIKey string `config:"api_key"`
|
||||
APISecret string `config:"api_secret"`
|
||||
UploadPrefix string `config:"upload_prefix"`
|
||||
UploadPreset string `config:"upload_preset"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloudinary server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
pacer *fs.Pacer
|
||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
||||
lastCRUD time.Time
|
||||
}
|
||||
|
||||
// Object describes a cloudinary object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
url string
|
||||
md5sum string
|
||||
publicID string
|
||||
resourceType string
|
||||
deliveryType string
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the Cloudinary client
|
||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
||||
}
|
||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
||||
if opt.UploadPrefix != "" {
|
||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
||||
}
|
||||
client := fshttp.NewClient(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
cld: cld,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
||||
srv: rest.NewClient(client),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = cldPathDir(root)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
// File doesn't exist so return the previous root
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardName(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardName(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
||||
}
|
||||
|
||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
||||
return strings.ReplaceAll(dir, "%", "%25")
|
||||
}
|
||||
|
||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
||||
return strings.ReplaceAll(dir, "!", "\\!")
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
||||
func (f *Fs) WaitEventuallyConsistent() {
|
||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
||||
return
|
||||
}
|
||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
||||
if timeSinceLastCRUD < delay {
|
||||
time.Sleep(delay - timeSinceLastCRUD)
|
||||
}
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
remotePrefix := f.FromStandardFullPath(dir)
|
||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
||||
remotePrefix += "/"
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
dirs := make(map[string]struct{})
|
||||
nextCursor := ""
|
||||
f.WaitEventuallyConsistent()
|
||||
for {
|
||||
// user the folders api to list folders.
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
folderParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
||||
}
|
||||
if results.Error.Message != "" {
|
||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
||||
}
|
||||
|
||||
for _, folder := range results.Folders {
|
||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
||||
parts := strings.Split(relativePath, "/")
|
||||
|
||||
// It's a directory
|
||||
dirName := parts[len(parts)-1]
|
||||
if _, found := dirs[dirName]; !found {
|
||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
||||
entries = append(entries, d)
|
||||
dirs[dirName] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
for {
|
||||
// Use the assets.AssetsByAssetFolder API to list assets
|
||||
assetsParams := admin.AssetsByAssetFolderParams{
|
||||
AssetFolder: remotePrefix,
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
assetsParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
||||
}
|
||||
|
||||
for _, asset := range results.Assets {
|
||||
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
|
||||
if dir != "" {
|
||||
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.CreatedAt,
|
||||
url: asset.SecureURL,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.AssetType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
searchParams := search.Query{
|
||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
||||
f.FromStandardFullPath(cldPathDir(remote)),
|
||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
||||
MaxResults: 2,
|
||||
}
|
||||
var results *admin.SearchResult
|
||||
f.WaitEventuallyConsistent()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err1 error
|
||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
||||
}
|
||||
return shouldRetry(ctx, nil, err1)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
asset := results.Assets[0]
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.UploadedAt,
|
||||
url: asset.SecureURL,
|
||||
md5sum: asset.Etag,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.ResourceType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
||||
payload := []byte(path.Join(assetFolder, displayName))
|
||||
hash := blake3.Sum256(payload)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Put uploads content to Cloudinary
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
params := uploader.UploadParams{
|
||||
UploadPreset: f.opt.UploadPreset,
|
||||
}
|
||||
|
||||
updateObject := false
|
||||
var modTime time.Time
|
||||
for _, option := range options {
|
||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
||||
if updateOptions.PublicID != "" {
|
||||
updateObject = true
|
||||
params.Overwrite = SDKApi.Bool(true)
|
||||
params.Invalidate = SDKApi.Bool(true)
|
||||
params.PublicID = updateOptions.PublicID
|
||||
params.ResourceType = updateOptions.ResourceType
|
||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
||||
params.AssetFolder = updateOptions.AssetFolder
|
||||
params.DisplayName = updateOptions.DisplayName
|
||||
modTime = src.ModTime(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !updateObject {
|
||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
||||
}
|
||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
||||
}
|
||||
if !updateObject {
|
||||
modTime = uploadResult.CreatedAt
|
||||
}
|
||||
if uploadResult.Error.Message != "" {
|
||||
return nil, errors.New(uploadResult.Error.Message)
|
||||
}
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: int64(uploadResult.Bytes),
|
||||
modTime: modTime,
|
||||
url: uploadResult.SecureURL,
|
||||
md5sum: uploadResult.Etag,
|
||||
publicID: uploadResult.PublicID,
|
||||
resourceType: uploadResult.ResourceType,
|
||||
deliveryType: uploadResult.Type,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Mkdir creates empty folders
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes empty folders
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// Additional test because Cloudinary will delete folders without
|
||||
// assets, regardless of empty sub-folders
|
||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: folder,
|
||||
MaxResults: 1,
|
||||
}
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if results.TotalCount > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
params := admin.DeleteFolderParams{Folder: folder}
|
||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
420, // Too Many Requests (legacy)
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
tryAgain := "Try again on "
|
||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
||||
layout := "2006-01-02 15:04:05 UTC"
|
||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
||||
timestamp, err2 := time.Parse(layout, dateStr)
|
||||
if err2 == nil {
|
||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Hash returns the MD5 of an object
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size of object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.url,
|
||||
Options: options,
|
||||
}
|
||||
var offset int64
|
||||
var count int64
|
||||
var key string
|
||||
var value string
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
key, value = option.Header()
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
key, value = option.Header()
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if key != "" && value != "" {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders[key] = value
|
||||
}
|
||||
// Make sure that the asset is fully available
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
||||
if clErr == nil && count == int64(cl) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
options = append(options, &api.UpdateOptions{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
DeliveryType: o.deliveryType,
|
||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
||||
})
|
||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uo, ok := updatedObj.(*Object); ok {
|
||||
o.size = uo.size
|
||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
||||
o.url = uo.url
|
||||
o.md5sum = uo.md5sum
|
||||
o.publicID = uo.publicID
|
||||
o.resourceType = uo.resourceType
|
||||
o.deliveryType = uo.deliveryType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := uploader.DestroyParams{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
Type: o.deliveryType,
|
||||
}
|
||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
||||
o.fs.lastCRUD = time.Now()
|
||||
if dErr != nil {
|
||||
return dErr
|
||||
}
|
||||
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
if res.Result != "ok" {
|
||||
return errors.New(res.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
23
backend/cloudinary/cloudinary_test.go
Normal file
23
backend/cloudinary/cloudinary_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// Test Cloudinary filesystem interface
|
||||
|
||||
package cloudinary_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/cloudinary"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestCloudinary"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*cloudinary.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -80,9 +80,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
driveConfig = &oauthutil.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -120,6 +121,7 @@ var (
|
||||
"text/html": ".html",
|
||||
"text/plain": ".txt",
|
||||
"text/tab-separated-values": ".tsv",
|
||||
"text/markdown": ".md",
|
||||
}
|
||||
_mimeTypeToExtensionLinks = map[string]string{
|
||||
"application/x-link-desktop": ".desktop",
|
||||
@@ -3523,14 +3525,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
// copy or move file with id to dest
|
||||
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3551,14 +3553,21 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
|
||||
var opErr error
|
||||
if operation == "moveid" {
|
||||
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
|
||||
} else {
|
||||
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
}
|
||||
if opErr != nil {
|
||||
return fmt.Errorf("%s failed: %w", operation, opErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
// Run the drive query calling fn on each entry found
|
||||
func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) {
|
||||
list := f.svc.Files.List()
|
||||
if query != "" {
|
||||
list.Q(query)
|
||||
@@ -3577,10 +3586,7 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
|
||||
|
||||
var results []*drive.File
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3588,20 +3594,66 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %w", err)
|
||||
return fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
if files.IncompleteSearch {
|
||||
fs.Errorf(f, "search result INCOMPLETE")
|
||||
}
|
||||
results = append(results, files.Files...)
|
||||
for _, item := range files.Files {
|
||||
fn(item)
|
||||
}
|
||||
if files.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
list.PageToken(files.NextPageToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the drive query returning the entries found
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
var results []*drive.File
|
||||
err = f.queryFn(ctx, query, func(item *drive.File) {
|
||||
results = append(results, item)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Rescue, list or delete orphaned files
|
||||
func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) {
|
||||
return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) {
|
||||
if len(item.Parents) != 0 {
|
||||
return
|
||||
}
|
||||
// Have found an orphaned entry
|
||||
if delete {
|
||||
fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id)
|
||||
err = f.delete(ctx, item.Id, true)
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err)
|
||||
}
|
||||
} else if dirID == "" {
|
||||
operations.SyncPrintf("%q, %q\n", item.Name, item.Id)
|
||||
} else {
|
||||
fs.Infof(item.Name, "Rescuing orphan %q", item.Id)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.Update(item.Id, nil).
|
||||
AddParents(dirID).
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@@ -3746,6 +3798,28 @@ attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "moveid",
|
||||
Short: "Move files by ID",
|
||||
Long: `This command moves files by ID
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
|
||||
The path should end with a / to indicate move the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server-side moving will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
@@ -3793,6 +3867,37 @@ The result is a JSON array of matches, for example:
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
}, {
|
||||
Name: "rescue",
|
||||
Short: "Rescue or delete any orphaned files",
|
||||
Long: `This command rescues or deletes any orphaned files or directories.
|
||||
|
||||
Sometimes files can get orphaned in Google Drive. This means that they
|
||||
are no longer in any folder in Google Drive.
|
||||
|
||||
This command finds those files and either rescues them to a directory
|
||||
you specify or deletes them.
|
||||
|
||||
Usage:
|
||||
|
||||
This can be used in 3 ways.
|
||||
|
||||
First, list all orphaned files
|
||||
|
||||
rclone backend rescue drive:
|
||||
|
||||
Second rescue all orphaned files to the directory indicated
|
||||
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
|
||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||
|
||||
rclone backend rescue drive: Orphans
|
||||
|
||||
Third delete all orphaned files to the trash
|
||||
|
||||
rclone backend rescue drive: -o delete
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3893,16 +3998,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
case "copyid", "moveid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
err = f.copyOrMoveID(ctx, name, id, dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
@@ -3921,6 +4026,22 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
if len(arg) == 0 {
|
||||
// no arguments - list only
|
||||
} else if !delete && len(arg) == 1 {
|
||||
dir := arg[0]
|
||||
dirID, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err)
|
||||
}
|
||||
fs.Infof(f, "Rescuing orphans into %q", dir)
|
||||
} else {
|
||||
return nil, errors.New("syntax error: need 0 or 1 args or -o delete")
|
||||
}
|
||||
return nil, f.rescue(ctx, dirID, delete)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
@@ -506,19 +506,31 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
|
||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
@@ -647,7 +659,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
||||
t.Run("Query", f.InternalTestQuery)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
@@ -21,14 +20,10 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -93,7 +94,7 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"files.metadata.write",
|
||||
"files.content.write",
|
||||
@@ -108,7 +109,8 @@ var (
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -133,7 +135,7 @@ var (
|
||||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
||||
// If not impersonating, use standard scopes
|
||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||
return dropboxConfig
|
||||
@@ -316,32 +318,46 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Some specific errors which should be excluded from retries
|
||||
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
errString := err.Error()
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// First check for specific errors
|
||||
//
|
||||
// These come back from the SDK in a whole host of different
|
||||
// error types, but there doesn't seem to be a consistent way
|
||||
// of reading the error cause, so here we just check using the
|
||||
// error string which isn't perfect but does the job.
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if strings.Contains(errString, "malformed_path") {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||
return true, err
|
||||
}
|
||||
@@ -1020,13 +1036,20 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
@@ -1040,7 +1063,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.CopyV2(&arg)
|
||||
@@ -1692,14 +1714,10 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -180,12 +180,28 @@ If this is set and no password is supplied then rclone will ask for a password
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_upload",
|
||||
Default: false,
|
||||
Help: `Don't check the upload is OK
|
||||
|
||||
Normally rclone will try to check the upload exists after it has
|
||||
uploaded a file to make sure the size and modification time are as
|
||||
expected.
|
||||
|
||||
This flag stops rclone doing these checks. This enables uploading to
|
||||
folders which are write only.
|
||||
|
||||
You will likely need to use the --inplace flag also if uploading to
|
||||
a write only folder.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -232,6 +248,7 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -1303,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if o.fs.opt.NoCheckUpload {
|
||||
o.info = &FileInfo{
|
||||
Name: o.remote,
|
||||
Size: uint64(src.Size()),
|
||||
ModTime: src.ModTime(ctx),
|
||||
precise: true,
|
||||
IsDir: false,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
|
||||
@@ -1214,7 +1214,7 @@ func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -1228,6 +1228,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Find existing object
|
||||
existingObj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
// Don't remove existing object if returning an error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy")
|
||||
err = existingObj.Remove(ctx)
|
||||
}()
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
|
||||
@@ -62,9 +62,10 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
storageConfig = &oauthutil.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -106,6 +107,12 @@ func init() {
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Sensitive: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
@@ -379,6 +386,7 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
AccessToken string `config:"access_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -535,6 +543,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else if opt.AccessToken != "" {
|
||||
ts := oauth2.Token{AccessToken: opt.AccessToken}
|
||||
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -944,7 +955,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
|
||||
@@ -28,13 +28,11 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
@@ -61,13 +59,14 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -160,6 +159,34 @@ listings and transferred.
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "proxy",
|
||||
Default: "",
|
||||
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||
|
||||
First run with
|
||||
|
||||
gphotosdl -login
|
||||
|
||||
Then once you have logged into google photos close the browser window
|
||||
and run
|
||||
|
||||
gphotosdl
|
||||
|
||||
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||
rclone use the proxy.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -181,6 +208,7 @@ type Options struct {
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
Proxy string `config:"proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -454,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
// defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
@@ -667,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -684,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -737,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
||||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -761,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -834,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
@@ -935,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
@@ -965,16 +993,20 @@ func (o *Object) downloadURL() string {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
url := o.downloadURL()
|
||||
if o.fs.opt.Proxy != "" {
|
||||
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1067,7 +1099,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
@@ -1136,7 +1168,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errors := make([]error, 1)
|
||||
results := make([]*api.MediaItem, 1)
|
||||
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = errors[0]
|
||||
info = results[0]
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,11 +47,9 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app.
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
|
||||
@@ -331,12 +331,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// Join's the remote onto the base URL
|
||||
func (f *Fs) url(remote string) string {
|
||||
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
|
||||
if f.opt.NoEscape {
|
||||
// Directly concatenate without escaping, no_escape behavior
|
||||
return f.endpointURL + remote
|
||||
return f.endpointURL + trimmedRemote
|
||||
}
|
||||
// Default behavior
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
return f.endpointURL + rest.URLPathEscape(trimmedRemote)
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
|
||||
@@ -191,6 +191,33 @@ func TestNewObject(t *testing.T) {
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestNewObjectWithLeadingSlash(t *testing.T) {
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "/four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "/four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test the time is correct on the object
|
||||
|
||||
tObj := o.ModTime(context.Background())
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject(context.Background(), "/not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
|
||||
|
||||
166
backend/iclouddrive/api/client.go
Normal file
166
backend/iclouddrive/api/client.go
Normal file
@@ -0,0 +1,166 @@
|
||||
// Package api provides functionality for interacting with the iCloud API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
baseEndpoint = "https://www.icloud.com"
|
||||
homeEndpoint = "https://www.icloud.com"
|
||||
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
|
||||
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
|
||||
)
|
||||
|
||||
type sessionSave func(*Session)
|
||||
|
||||
// Client defines the client configuration
|
||||
type Client struct {
|
||||
appleID string
|
||||
password string
|
||||
srv *rest.Client
|
||||
Session *Session
|
||||
sessionSaveCallback sessionSave
|
||||
|
||||
drive *DriveService
|
||||
}
|
||||
|
||||
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
|
||||
//
|
||||
// Parameters:
|
||||
// - appleID: the Apple ID of the user.
|
||||
// - password: the password of the user.
|
||||
// - trustToken: the trust token for the session.
|
||||
// - clientID: the client id for the session.
|
||||
// - cookies: the cookies for the session.
|
||||
// - sessionSaveCallback: the callback function to save the session.
|
||||
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
|
||||
icloud := &Client{
|
||||
appleID: appleID,
|
||||
password: password,
|
||||
srv: rest.NewClient(fshttp.NewClient(context.Background())),
|
||||
Session: NewSession(),
|
||||
sessionSaveCallback: sessionSaveCallback,
|
||||
}
|
||||
|
||||
icloud.Session.TrustToken = trustToken
|
||||
icloud.Session.Cookies = cookies
|
||||
icloud.Session.ClientID = clientID
|
||||
return icloud, nil
|
||||
}
|
||||
|
||||
// DriveService returns the DriveService instance associated with the Client.
|
||||
func (c *Client) DriveService() (*DriveService, error) {
|
||||
var err error
|
||||
if c.drive == nil {
|
||||
c.drive, err = NewDriveService(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c.drive, nil
|
||||
}
|
||||
|
||||
// Request makes a request and retries it if the session is invalid.
|
||||
//
|
||||
// This function is the main entry point for making requests to the iCloud
|
||||
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
||||
// reauthenticate and retry the request.
|
||||
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
if err != nil && resp != nil {
|
||||
// try to reauth
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 421 {
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.Session.Requires2FA() {
|
||||
return nil, errors.New("trust token expired, please reauth")
|
||||
}
|
||||
return c.RequestNoReAuth(ctx, opts, request, response)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// RequestNoReAuth makes a request without re-authenticating.
|
||||
//
|
||||
// This function is useful when you have a session that is already
|
||||
// authenticated, but you need to make a request without triggering
|
||||
// a re-authentication.
|
||||
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
// Make the request without re-authenticating
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Authenticate authenticates the client with the iCloud API.
|
||||
func (c *Client) Authenticate(ctx context.Context) error {
|
||||
if c.Session.Cookies != nil {
|
||||
if err := c.Session.ValidateSession(ctx); err == nil {
|
||||
fs.Debugf("icloud", "Valid session, no need to reauth")
|
||||
return nil
|
||||
}
|
||||
c.Session.Cookies = nil
|
||||
}
|
||||
|
||||
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
|
||||
err := c.Session.SignIn(ctx, c.appleID, c.password)
|
||||
|
||||
if err == nil {
|
||||
err = c.Session.AuthWithToken(ctx)
|
||||
if err == nil && c.sessionSaveCallback != nil {
|
||||
c.sessionSaveCallback(c.Session)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SignIn signs in the client using the provided context and credentials.
|
||||
func (c *Client) SignIn(ctx context.Context) error {
|
||||
return c.Session.SignIn(ctx, c.appleID, c.password)
|
||||
}
|
||||
|
||||
// IntoReader marshals the provided values into a JSON encoded reader
|
||||
func IntoReader(values any) (*bytes.Reader, error) {
|
||||
m, err := json.Marshal(values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.NewReader(m), nil
|
||||
}
|
||||
|
||||
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
|
||||
type RequestError struct {
|
||||
Status string
|
||||
Text string
|
||||
}
|
||||
|
||||
// Error satisfy the error interface.
|
||||
func (e *RequestError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Text, e.Status)
|
||||
}
|
||||
|
||||
func newRequestError(Status string, Text string) *RequestError {
|
||||
return &RequestError{
|
||||
Status: strings.ToLower(Status),
|
||||
Text: Text,
|
||||
}
|
||||
}
|
||||
|
||||
// newErr orf makes a new error from sprintf parameters.
|
||||
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
|
||||
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
||||
}
|
||||
913
backend/iclouddrive/api/drive.go
Normal file
913
backend/iclouddrive/api/drive.go
Normal file
@@ -0,0 +1,913 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultZone = "com.apple.CloudDocs"
|
||||
statusOk = "OK"
|
||||
statusEtagConflict = "ETAG_CONFLICT"
|
||||
)
|
||||
|
||||
// DriveService represents an iCloud Drive service.
|
||||
type DriveService struct {
|
||||
icloud *Client
|
||||
RootID string
|
||||
endpoint string
|
||||
docsEndpoint string
|
||||
}
|
||||
|
||||
// NewDriveService creates a new DriveService instance.
|
||||
func NewDriveService(icloud *Client) (*DriveService, error) {
|
||||
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
|
||||
}
|
||||
|
||||
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
|
||||
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
|
||||
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return items[0], resp, err
|
||||
}
|
||||
|
||||
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
|
||||
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
|
||||
var err error
|
||||
_items := []map[string]any{}
|
||||
for _, id := range ids {
|
||||
_items = append(_items, map[string]any{
|
||||
"drivewsid": id,
|
||||
"partialData": false,
|
||||
"includeHierarchy": false,
|
||||
})
|
||||
}
|
||||
|
||||
var body *bytes.Reader
|
||||
var path string
|
||||
if !includeChildren {
|
||||
values := []map[string]any{{
|
||||
"items": _items,
|
||||
}}
|
||||
body, err = IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
path = "/retrieveItemDetails"
|
||||
} else {
|
||||
values := _items
|
||||
body, err = IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
path = "/retrieveItemDetailsInFolders"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: path,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var items []*DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return items, resp, err
|
||||
}
|
||||
|
||||
// GetDocByPath retrieves a document by its path.
|
||||
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("unified_format", "false")
|
||||
body, err := IntoReader(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
Body: body,
|
||||
}
|
||||
var item []*Document
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item[0], resp, err
|
||||
}
|
||||
|
||||
// GetItemByPath retrieves a DriveItem by its path.
|
||||
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("unified_format", "true")
|
||||
|
||||
body, err := IntoReader(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
Body: body,
|
||||
}
|
||||
var item []*DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item[0], resp, err
|
||||
}
|
||||
|
||||
// GetDocByItemID retrieves a document by its item ID.
|
||||
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("document_id", id)
|
||||
values.Set("unified_format", "false") // important
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
}
|
||||
var item *Document
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item, resp, err
|
||||
}
|
||||
|
||||
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
|
||||
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v1/item/" + id,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
}
|
||||
var item *DriveItemRaw
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item, resp, err
|
||||
}
|
||||
|
||||
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
|
||||
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("limit", strconv.FormatInt(limit, 10))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v1/enumerate/" + id,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
items := struct {
|
||||
Items []*DriveItemRaw `json:"drive_item"`
|
||||
}{}
|
||||
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return items.Items, resp, err
|
||||
}
|
||||
|
||||
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
|
||||
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
|
||||
_, zone, docid := DeconstructDriveID(id)
|
||||
values := url.Values{}
|
||||
values.Set("document_id", docid)
|
||||
|
||||
if zone == "" {
|
||||
zone = defaultZone
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/ws/" + zone + "/download/by_id",
|
||||
Parameters: values,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
}
|
||||
|
||||
var filer *FileRequest
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
|
||||
|
||||
if err != nil {
|
||||
return "", resp, err
|
||||
}
|
||||
|
||||
var url string
|
||||
if filer.DataToken != nil {
|
||||
url = filer.DataToken.URL
|
||||
} else {
|
||||
url = filer.PackageToken.URL
|
||||
}
|
||||
|
||||
return url, resp, err
|
||||
}
|
||||
|
||||
// DownloadFile downloads a file from the given URL using the provided options.
|
||||
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: url,
|
||||
Options: opt,
|
||||
}
|
||||
|
||||
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||
if err != nil {
|
||||
// icloud has some weird http codes
|
||||
if resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
return d.icloud.srv.Call(ctx, opts)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByID moves an item to the trash based on the item ID.
|
||||
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": drivewsid,
|
||||
"etag": etag,
|
||||
"clientId": drivewsid,
|
||||
}}}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/moveItemsToTrash",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
item := struct {
|
||||
Items []*DriveItem `json:"items"`
|
||||
}{}
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
if item.Items[0].Status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && item.Items[0].Status == "ETAG_CONFLICT" {
|
||||
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
|
||||
}
|
||||
|
||||
err = newRequestError(item.Items[0].Status, "unknown request status")
|
||||
}
|
||||
|
||||
return item.Items[0], resp, err
|
||||
}
|
||||
|
||||
// CreateNewFolderByItemID creates a new folder by item ID.
|
||||
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
|
||||
}
|
||||
|
||||
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
|
||||
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"destinationDrivewsId": drivewsid,
|
||||
"folders": []map[string]any{{
|
||||
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
|
||||
"name": name,
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/createFolders",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var fResp *CreateFoldersResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
status := fResp.Folders[0].Status
|
||||
if status != statusOk {
|
||||
err = newRequestError(status, "unknown request status")
|
||||
}
|
||||
|
||||
return fResp.Folders[0], resp, err
|
||||
}
|
||||
|
||||
// RenameItemByItemID renames a DriveItem by its item ID.
|
||||
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
|
||||
}
|
||||
|
||||
// RenameItemByDriveID renames a DriveItem by its drive ID.
|
||||
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": id,
|
||||
"name": name,
|
||||
"etag": etag,
|
||||
// "extension": split[1],
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/renameItems",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var items *DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
status := items.Items[0].Status
|
||||
if status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && status == "ETAG_CONFLICT" {
|
||||
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
|
||||
}
|
||||
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||
}
|
||||
|
||||
return items.Items[0], resp, err
|
||||
}
|
||||
|
||||
// MoveItemByItemID moves an item by its item ID to a destination item ID.
|
||||
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
docSrc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
|
||||
}
|
||||
|
||||
// MoveItemByDocID moves an item by its doc ID.
|
||||
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
|
||||
// }
|
||||
|
||||
// MoveItemByDriveID moves an item by its drive ID.
|
||||
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"destinationDrivewsId": dstID,
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": id,
|
||||
"etag": etag,
|
||||
"clientId": id,
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/moveItems",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
var items *DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
status := items.Items[0].Status
|
||||
if status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && status == "ETAG_CONFLICT" {
|
||||
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
|
||||
}
|
||||
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||
}
|
||||
|
||||
return items.Items[0], resp, err
|
||||
}
|
||||
|
||||
// CopyDocByItemID copies a document by its item ID.
|
||||
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
|
||||
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
|
||||
values := map[string]any{
|
||||
"info_to_update": map[string]any{},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/v1/item/copy/" + itemID,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
var info *DriveItemRaw
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// CreateUpload creates an url for an upload.
|
||||
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
|
||||
// first we need to request an upload url
|
||||
values := map[string]any{
|
||||
"filename": name,
|
||||
"type": "FILE",
|
||||
"size": strconv.FormatInt(size, 10),
|
||||
"content_type": GetContentTypeForFile(name),
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/upload/web",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
var responseInfo []*UploadResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return responseInfo[0], resp, err
|
||||
}
|
||||
|
||||
// Upload uploads a file to the given url
|
||||
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
|
||||
// TODO: implement multipart upload
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: uploadURL,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
ContentType: GetContentTypeForFile(name),
|
||||
// MultipartContentName: "files",
|
||||
MultipartFileName: name,
|
||||
}
|
||||
var singleFileResponse *SingleFileResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return singleFileResponse, resp, err
|
||||
}
|
||||
|
||||
// UpdateFile updates a file in the DriveService.
|
||||
//
|
||||
// ctx: the context.Context object for the request.
|
||||
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
|
||||
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
|
||||
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
|
||||
body, err := IntoReader(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/update/documents",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
var responseInfo *DocumentUpdateResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
doc := responseInfo.Results[0].Document
|
||||
item := DriveItem{
|
||||
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
|
||||
Docwsid: doc.DocumentID,
|
||||
Itemid: doc.ItemID,
|
||||
Etag: doc.Etag,
|
||||
ParentID: doc.ParentID,
|
||||
DateModified: time.Unix(r.Mtime, 0),
|
||||
DateCreated: time.Unix(r.Mtime, 0),
|
||||
Type: doc.Type,
|
||||
Name: doc.Name,
|
||||
Size: doc.Size,
|
||||
}
|
||||
|
||||
return &item, resp, err
|
||||
}
|
||||
|
||||
// UpdateFileInfo represents the information for an update to a file in the DriveService.
|
||||
type UpdateFileInfo struct {
|
||||
AllowConflict bool `json:"allow_conflict"`
|
||||
Btime int64 `json:"btime"`
|
||||
Command string `json:"command"`
|
||||
CreateShortGUID bool `json:"create_short_guid"`
|
||||
Data struct {
|
||||
Receipt string `json:"receipt,omitempty"`
|
||||
ReferenceSignature string `json:"reference_signature,omitempty"`
|
||||
Signature string `json:"signature,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
WrappingKey string `json:"wrapping_key,omitempty"`
|
||||
} `json:"data,omitempty"`
|
||||
DocumentID string `json:"document_id"`
|
||||
FileFlags FileFlags `json:"file_flags"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
Path struct {
|
||||
Path string `json:"path"`
|
||||
StartingDocumentID string `json:"starting_document_id"`
|
||||
} `json:"path"`
|
||||
}
|
||||
|
||||
// FileFlags defines the file flags for a document.
|
||||
type FileFlags struct {
|
||||
IsExecutable bool `json:"is_executable"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
}
|
||||
|
||||
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
|
||||
//
|
||||
// Returns an UpdateFileInfo object.
|
||||
func NewUpdateFileInfo() UpdateFileInfo {
|
||||
return UpdateFileInfo{
|
||||
Command: "add_file",
|
||||
CreateShortGUID: true,
|
||||
AllowConflict: true,
|
||||
FileFlags: FileFlags{
|
||||
IsExecutable: true,
|
||||
IsHidden: false,
|
||||
IsWritable: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// DriveItemRaw is a raw drive item.
|
||||
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
|
||||
type DriveItemRaw struct {
|
||||
ItemID string `json:"item_id"`
|
||||
ItemInfo *DriveItemRawInfo `json:"item_info"`
|
||||
}
|
||||
|
||||
// SplitName splits the name of a DriveItemRaw into its name and extension.
|
||||
//
|
||||
// It returns the name and extension as separate strings. If the name ends with a dot,
|
||||
// it means there is no extension, so an empty string is returned for the extension.
|
||||
// If the name does not contain a dot, it means
|
||||
func (d *DriveItemRaw) SplitName() (string, string) {
|
||||
name := d.ItemInfo.Name
|
||||
// ends with a dot, no extension
|
||||
if strings.HasSuffix(name, ".") {
|
||||
return name, ""
|
||||
}
|
||||
lastInd := strings.LastIndex(name, ".")
|
||||
|
||||
if lastInd == -1 {
|
||||
return name, ""
|
||||
}
|
||||
return name[:lastInd], name[lastInd+1:]
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the DriveItemRaw.
|
||||
//
|
||||
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||
// If the parsing fails, it returns the zero value of time.Time.
|
||||
// The returned time.Time value represents the modification time of the DriveItemRaw.
|
||||
func (d *DriveItemRaw) ModTime() time.Time {
|
||||
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.UnixMilli(i)
|
||||
}
|
||||
|
||||
// CreatedTime returns the creation time of the DriveItemRaw.
|
||||
//
|
||||
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||
// If the parsing fails, it returns the zero value of time.Time.
|
||||
// The returned time.Time
|
||||
func (d *DriveItemRaw) CreatedTime() time.Time {
|
||||
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.UnixMilli(i)
|
||||
}
|
||||
|
||||
// DriveItemRawInfo is the raw information about a drive item.
|
||||
type DriveItemRawInfo struct {
|
||||
Name string `json:"name"`
|
||||
// Extension is absolutely borked on endpoints so dont use it.
|
||||
Extension string `json:"extension"`
|
||||
Size int64 `json:"size,string"`
|
||||
Type string `json:"type"`
|
||||
Version string `json:"version"`
|
||||
ModifiedAt string `json:"modified_at"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
|
||||
//
|
||||
// It takes no parameters.
|
||||
// It returns a pointer to a DriveItem.
|
||||
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
|
||||
name, extension := d.SplitName()
|
||||
return &DriveItem{
|
||||
Itemid: d.ItemID,
|
||||
Name: name,
|
||||
Extension: extension,
|
||||
Type: d.ItemInfo.Type,
|
||||
Etag: d.ItemInfo.Version,
|
||||
DateModified: d.ModTime(),
|
||||
DateCreated: d.CreatedTime(),
|
||||
Size: d.ItemInfo.Size,
|
||||
Urls: d.ItemInfo.Urls,
|
||||
}
|
||||
}
|
||||
|
||||
// DocumentUpdateResponse is the response of a document update request.
|
||||
type DocumentUpdateResponse struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
Results []struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
OperationID interface{} `json:"operation_id"`
|
||||
Document *Document `json:"document"`
|
||||
} `json:"results"`
|
||||
}
|
||||
|
||||
// Document represents a document on iCloud.
|
||||
type Document struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
DocumentID string `json:"document_id"`
|
||||
ItemID string `json:"item_id"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
Etag string `json:"etag"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Deleted bool `json:"deleted"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
LastEditorName string `json:"last_editor_name"`
|
||||
Data DocumentData `json:"data"`
|
||||
Size int64 `json:"size"`
|
||||
Btime int64 `json:"btime"`
|
||||
Zone string `json:"zone"`
|
||||
FileFlags struct {
|
||||
IsExecutable bool `json:"is_executable"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
} `json:"file_flags"`
|
||||
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||
RestorePath interface{} `json:"restorePath"`
|
||||
HasChainedParent bool `json:"hasChainedParent"`
|
||||
}
|
||||
|
||||
// DriveID returns the drive ID of the Document.
|
||||
func (d *Document) DriveID() string {
|
||||
if d.Zone == "" {
|
||||
d.Zone = defaultZone
|
||||
}
|
||||
return d.Type + "::" + d.Zone + "::" + d.DocumentID
|
||||
}
|
||||
|
||||
// DocumentData represents the data of a document.
|
||||
type DocumentData struct {
|
||||
Signature string `json:"signature"`
|
||||
Owner string `json:"owner"`
|
||||
Size int64 `json:"size"`
|
||||
ReferenceSignature string `json:"reference_signature"`
|
||||
WrappingKey string `json:"wrapping_key"`
|
||||
PcsInfo string `json:"pcsInfo"`
|
||||
}
|
||||
|
||||
// SingleFileResponse is the response of a single file request.
|
||||
type SingleFileResponse struct {
|
||||
SingleFile *SingleFileInfo `json:"singleFile"`
|
||||
}
|
||||
|
||||
// SingleFileInfo represents the information of a single file.
|
||||
type SingleFileInfo struct {
|
||||
ReferenceSignature string `json:"referenceChecksum"`
|
||||
Size int64 `json:"size"`
|
||||
Signature string `json:"fileChecksum"`
|
||||
WrappingKey string `json:"wrappingKey"`
|
||||
Receipt string `json:"receipt"`
|
||||
}
|
||||
|
||||
// UploadResponse is the response of an upload request.
|
||||
type UploadResponse struct {
|
||||
URL string `json:"url"`
|
||||
DocumentID string `json:"document_id"`
|
||||
}
|
||||
|
||||
// FileRequestToken represents the token of a file request.
|
||||
type FileRequestToken struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Signature string `json:"signature"`
|
||||
WrappingKey string `json:"wrapping_key"`
|
||||
ReferenceSignature string `json:"reference_signature"`
|
||||
}
|
||||
|
||||
// FileRequest represents the request of a file.
|
||||
type FileRequest struct {
|
||||
DocumentID string `json:"document_id"`
|
||||
ItemID string `json:"item_id"`
|
||||
OwnerDsid int64 `json:"owner_dsid"`
|
||||
DataToken *FileRequestToken `json:"data_token,omitempty"`
|
||||
PackageToken *FileRequestToken `json:"package_token,omitempty"`
|
||||
DoubleEtag string `json:"double_etag"`
|
||||
}
|
||||
|
||||
// CreateFoldersResponse is the response of a create folders request.
|
||||
type CreateFoldersResponse struct {
|
||||
Folders []*DriveItem `json:"folders"`
|
||||
}
|
||||
|
||||
// DriveItem represents an item on iCloud.
|
||||
type DriveItem struct {
|
||||
DateCreated time.Time `json:"dateCreated"`
|
||||
Drivewsid string `json:"drivewsid"`
|
||||
Docwsid string `json:"docwsid"`
|
||||
Itemid string `json:"item_id"`
|
||||
Zone string `json:"zone"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parentId"`
|
||||
Hierarchy []DriveItem `json:"hierarchy"`
|
||||
Etag string `json:"etag"`
|
||||
Type string `json:"type"`
|
||||
AssetQuota int64 `json:"assetQuota"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
ShareCount int64 `json:"shareCount"`
|
||||
ShareAliasCount int64 `json:"shareAliasCount"`
|
||||
DirectChildrenCount int64 `json:"directChildrenCount"`
|
||||
Items []*DriveItem `json:"items"`
|
||||
NumberOfItems int64 `json:"numberOfItems"`
|
||||
Status string `json:"status"`
|
||||
Extension string `json:"extension,omitempty"`
|
||||
DateModified time.Time `json:"dateModified,omitempty"`
|
||||
DateChanged time.Time `json:"dateChanged,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// IsFolder returns true if the item is a folder.
|
||||
func (d *DriveItem) IsFolder() bool {
|
||||
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
|
||||
}
|
||||
|
||||
// DownloadURL returns the download URL of the item.
|
||||
func (d *DriveItem) DownloadURL() string {
|
||||
return d.Urls.URLDownload
|
||||
}
|
||||
|
||||
// FullName returns the full name of the item.
|
||||
// name + extension
|
||||
func (d *DriveItem) FullName() string {
|
||||
if d.Extension != "" {
|
||||
return d.Name + "." + d.Extension
|
||||
}
|
||||
return d.Name
|
||||
}
|
||||
|
||||
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
|
||||
func GetDocIDFromDriveID(id string) string {
|
||||
split := strings.Split(id, "::")
|
||||
return split[len(split)-1]
|
||||
}
|
||||
|
||||
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
|
||||
func DeconstructDriveID(id string) (docType, zone, docid string) {
|
||||
split := strings.Split(id, "::")
|
||||
if len(split) < 3 {
|
||||
return "", "", id
|
||||
}
|
||||
return split[0], split[1], split[2]
|
||||
}
|
||||
|
||||
// ConstructDriveID constructs a drive ID from the given components.
|
||||
func ConstructDriveID(id string, zone string, t string) string {
|
||||
return strings.Join([]string{t, zone, id}, "::")
|
||||
}
|
||||
|
||||
// GetContentTypeForFile detects content type for given file name.
|
||||
func GetContentTypeForFile(name string) string {
|
||||
// detect MIME type by looking at the filename only
|
||||
mimeType := mime.TypeByExtension(filepath.Ext(name))
|
||||
if mimeType == "" {
|
||||
// api requires a mime type passed in
|
||||
mimeType = "text/plain"
|
||||
}
|
||||
return strings.Split(mimeType, ";")[0]
|
||||
}
|
||||
412
backend/iclouddrive/api/session.go
Normal file
412
backend/iclouddrive/api/session.go
Normal file
@@ -0,0 +1,412 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Session represents an iCloud session
|
||||
type Session struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
Scnt string `json:"scnt"`
|
||||
SessionID string `json:"session_id"`
|
||||
AccountCountry string `json:"account_country"`
|
||||
TrustToken string `json:"trust_token"`
|
||||
ClientID string `json:"client_id"`
|
||||
Cookies []*http.Cookie `json:"cookies"`
|
||||
AccountInfo AccountInfo `json:"account_info"`
|
||||
|
||||
srv *rest.Client `json:"-"`
|
||||
}
|
||||
|
||||
// String returns the session as a string
|
||||
// func (s *Session) String() string {
|
||||
// jsession, _ := json.Marshal(s)
|
||||
// return string(jsession)
|
||||
// }
|
||||
|
||||
// Request makes a request
|
||||
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
|
||||
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
|
||||
s.AccountCountry = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
|
||||
s.SessionID = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
|
||||
s.SessionToken = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
|
||||
s.TrustToken = val
|
||||
}
|
||||
if val := resp.Header.Get("scnt"); val != "" {
|
||||
s.Scnt = val
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Requires2FA returns true if the session requires 2FA
|
||||
func (s *Session) Requires2FA() bool {
|
||||
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
|
||||
}
|
||||
|
||||
// SignIn signs in the session
|
||||
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
|
||||
trustTokens := []string{}
|
||||
if s.TrustToken != "" {
|
||||
trustTokens = []string{s.TrustToken}
|
||||
}
|
||||
values := map[string]any{
|
||||
"accountName": appleID,
|
||||
"password": password,
|
||||
"rememberMe": true,
|
||||
"trustTokens": trustTokens,
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/signin",
|
||||
Parameters: url.Values{},
|
||||
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
|
||||
RootURL: authEndpoint,
|
||||
IgnoreStatus: true, // need to handle 409 for hsa2
|
||||
NoResponse: true,
|
||||
Body: body,
|
||||
}
|
||||
opts.Parameters.Set("isRememberMeEnabled", "true")
|
||||
_, err = s.Request(ctx, opts, nil, nil)
|
||||
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// AuthWithToken authenticates the session
|
||||
func (s *Session) AuthWithToken(ctx context.Context) error {
|
||||
values := map[string]any{
|
||||
"accountCountryCode": s.AccountCountry,
|
||||
"dsWebAuthToken": s.SessionToken,
|
||||
"extended_login": true,
|
||||
"trustToken": s.TrustToken,
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/accountLogin",
|
||||
ExtraHeaders: GetCommonHeaders(map[string]string{}),
|
||||
RootURL: setupEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||
if err == nil {
|
||||
s.Cookies = resp.Cookies()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate2FACode validates the 2FA code
|
||||
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
||||
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := s.GetAuthHeaders(map[string]string{})
|
||||
headers["scnt"] = s.Scnt
|
||||
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/verify/trusteddevice/securitycode",
|
||||
ExtraHeaders: headers,
|
||||
RootURL: authEndpoint,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
_, err = s.Request(ctx, opts, nil, nil)
|
||||
if err == nil {
|
||||
if err := s.TrustSession(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("validate2FACode failed: %w", err)
|
||||
}
|
||||
|
||||
// TrustSession trusts the session
|
||||
func (s *Session) TrustSession(ctx context.Context) error {
|
||||
headers := s.GetAuthHeaders(map[string]string{})
|
||||
headers["scnt"] = s.Scnt
|
||||
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/2sv/trust",
|
||||
ExtraHeaders: headers,
|
||||
RootURL: authEndpoint,
|
||||
NoResponse: true,
|
||||
ContentLength: common.Int64(0),
|
||||
}
|
||||
|
||||
_, err := s.Request(ctx, opts, nil, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("trustSession failed: %w", err)
|
||||
}
|
||||
|
||||
return s.AuthWithToken(ctx)
|
||||
}
|
||||
|
||||
// ValidateSession validates the session
|
||||
func (s *Session) ValidateSession(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/validate",
|
||||
ExtraHeaders: s.GetHeaders(map[string]string{}),
|
||||
RootURL: setupEndpoint,
|
||||
ContentLength: common.Int64(0),
|
||||
}
|
||||
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("validateSession failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAuthHeaders returns the authentication headers for the session.
|
||||
//
|
||||
// It takes an `overwrite` map[string]string parameter which allows
|
||||
// overwriting the default headers. It returns a map[string]string.
|
||||
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := map[string]string{
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json",
|
||||
"X-Apple-OAuth-Client-Id": s.ClientID,
|
||||
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
|
||||
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
|
||||
"X-Apple-OAuth-Require-Grant-Code": "true",
|
||||
"X-Apple-OAuth-Response-Mode": "web_message",
|
||||
"X-Apple-OAuth-Response-Type": "code",
|
||||
"X-Apple-OAuth-State": s.ClientID,
|
||||
"X-Apple-Widget-Key": s.ClientID,
|
||||
"Origin": homeEndpoint,
|
||||
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// GetHeaders Gets the authentication headers required for a request
|
||||
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := GetCommonHeaders(map[string]string{})
|
||||
headers["Cookie"] = s.GetCookieString()
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// GetCookieString returns the cookie header string for the session.
|
||||
func (s *Session) GetCookieString() string {
|
||||
cookieHeader := ""
|
||||
// we only care about name and value.
|
||||
for _, cookie := range s.Cookies {
|
||||
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
|
||||
}
|
||||
return cookieHeader
|
||||
}
|
||||
|
||||
// GetCommonHeaders generates common HTTP headers with optional overwrite.
|
||||
func GetCommonHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
"Origin": baseEndpoint,
|
||||
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
|
||||
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
|
||||
var hashes []string
|
||||
for _, cookie := range right {
|
||||
hashes = append(hashes, cookie.Raw)
|
||||
}
|
||||
for _, cookie := range left {
|
||||
if !slices.Contains(hashes, cookie.Raw) {
|
||||
right = append(right, cookie)
|
||||
}
|
||||
}
|
||||
return right, nil
|
||||
}
|
||||
|
||||
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
|
||||
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
|
||||
var domainCookies []*http.Cookie
|
||||
for _, cookie := range cookies {
|
||||
if strings.HasSuffix(url.Host, cookie.Domain) {
|
||||
domainCookies = append(domainCookies, cookie)
|
||||
}
|
||||
}
|
||||
return domainCookies, nil
|
||||
}
|
||||
|
||||
// NewSession creates a new Session instance with default values.
|
||||
func NewSession() *Session {
|
||||
session := &Session{}
|
||||
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
|
||||
//session.ClientID = "auth-" + uuid.New().String()
|
||||
return session
|
||||
}
|
||||
|
||||
// AccountInfo represents an account info
|
||||
type AccountInfo struct {
|
||||
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
|
||||
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
Webservices map[string]*webService `json:"webservices"`
|
||||
PcsEnabled bool `json:"pcsEnabled"`
|
||||
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
|
||||
ConfigBag struct {
|
||||
Urls struct {
|
||||
AccountCreateUI string `json:"accountCreateUI"`
|
||||
AccountLoginUI string `json:"accountLoginUI"`
|
||||
AccountLogin string `json:"accountLogin"`
|
||||
AccountRepairUI string `json:"accountRepairUI"`
|
||||
DownloadICloudTerms string `json:"downloadICloudTerms"`
|
||||
RepairDone string `json:"repairDone"`
|
||||
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
|
||||
VettingURLForEmail string `json:"vettingUrlForEmail"`
|
||||
AccountCreate string `json:"accountCreate"`
|
||||
GetICloudTerms string `json:"getICloudTerms"`
|
||||
VettingURLForPhone string `json:"vettingUrlForPhone"`
|
||||
} `json:"urls"`
|
||||
AccountCreateEnabled bool `json:"accountCreateEnabled"`
|
||||
} `json:"configBag"`
|
||||
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
|
||||
AppsOrder []string `json:"appsOrder"`
|
||||
Version int `json:"version"`
|
||||
IsExtendedLogin bool `json:"isExtendedLogin"`
|
||||
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
|
||||
IsRepairNeeded bool `json:"isRepairNeeded"`
|
||||
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
|
||||
RequestInfo struct {
|
||||
Country string `json:"country"`
|
||||
TimeZone string `json:"timeZone"`
|
||||
Region string `json:"region"`
|
||||
} `json:"requestInfo"`
|
||||
PcsDeleted bool `json:"pcsDeleted"`
|
||||
ICloudInfo struct {
|
||||
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
|
||||
} `json:"iCloudInfo"`
|
||||
Apps map[string]*ValidateDataApp `json:"apps"`
|
||||
}
|
||||
|
||||
// ValidateDataDsInfo represents an validation info
|
||||
type ValidateDataDsInfo struct {
|
||||
HsaVersion int `json:"hsaVersion"`
|
||||
LastName string `json:"lastName"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
TantorMigrated bool `json:"tantorMigrated"`
|
||||
Dsid string `json:"dsid"`
|
||||
HsaEnabled bool `json:"hsaEnabled"`
|
||||
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||
Locale string `json:"locale"`
|
||||
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||
Gilligvited bool `json:"gilligvited"`
|
||||
AppleIDAliases []interface{} `json:"appleIdAliases"`
|
||||
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||
CountryCode string `json:"countryCode"`
|
||||
NotificationID string `json:"notificationId"`
|
||||
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||
ADsID string `json:"aDsID"`
|
||||
Locked bool `json:"locked"`
|
||||
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||
PrimaryEmail string `json:"primaryEmail"`
|
||||
AppleIDEntries []struct {
|
||||
IsPrimary bool `json:"isPrimary"`
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
} `json:"appleIdEntries"`
|
||||
GilliganEnabled bool `json:"gilligan-enabled"`
|
||||
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
|
||||
FullName string `json:"fullName"`
|
||||
MailFlags struct {
|
||||
IsThreadingAvailable bool `json:"isThreadingAvailable"`
|
||||
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
|
||||
SCKMail bool `json:"sCKMail"`
|
||||
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
|
||||
} `json:"mailFlags"`
|
||||
LanguageCode string `json:"languageCode"`
|
||||
AppleID string `json:"appleId"`
|
||||
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
|
||||
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
|
||||
FirstName string `json:"firstName"`
|
||||
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
|
||||
NotesMigrated bool `json:"notesMigrated"`
|
||||
BeneficiaryInfo struct {
|
||||
IsBeneficiary bool `json:"isBeneficiary"`
|
||||
} `json:"beneficiaryInfo"`
|
||||
HasPaymentInfo bool `json:"hasPaymentInfo"`
|
||||
PcsDelet bool `json:"pcsDelet"`
|
||||
AppleIDAlias string `json:"appleIdAlias"`
|
||||
BrMigrated bool `json:"brMigrated"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
FamilyEligible bool `json:"familyEligible"`
|
||||
}
|
||||
|
||||
// ValidateDataApp represents an app
|
||||
type ValidateDataApp struct {
|
||||
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
|
||||
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
|
||||
}
|
||||
|
||||
// WebService represents a web service
|
||||
type webService struct {
|
||||
PcsRequired bool `json:"pcsRequired"`
|
||||
URL string `json:"url"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
1174
backend/iclouddrive/iclouddrive.go
Normal file
1174
backend/iclouddrive/iclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/iclouddrive/iclouddrive_test.go
Normal file
18
backend/iclouddrive/iclouddrive_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
//go:build !plan9 && !solaris
|
||||
|
||||
package iclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/iclouddrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestICloudDrive:",
|
||||
NilObject: (*iclouddrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for iclouddrive for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris
|
||||
|
||||
// Package iclouddrive implements the iCloud Drive backend
|
||||
package iclouddrive
|
||||
@@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
// JobIDResponse represents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
@@ -151,6 +151,19 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
|
||||
Default: "https://archive.org",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_metadata",
|
||||
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
|
||||
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
|
||||
Default: []string{},
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_derive",
|
||||
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
|
||||
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
|
||||
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
|
||||
Default: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
|
||||
@@ -201,6 +214,8 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
FrontEndpoint string `config:"front_endpoint"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
ItemMetadata []string `config:"item_metadata"`
|
||||
ItemDerive bool `config:"item_derive"`
|
||||
WaitArchive fs.Duration `config:"wait_archive"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -790,17 +805,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
"x-amz-filemeta-rclone-update-track": updateTracker,
|
||||
|
||||
// we add some more headers for intuitive actions
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
|
||||
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
}
|
||||
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||
}
|
||||
|
||||
// This is IA's ITEM metadata, not file metadata
|
||||
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mdata fs.Metadata
|
||||
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err == nil && mdata != nil {
|
||||
@@ -863,6 +884,51 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
|
||||
metadataCounter := make(map[string]int)
|
||||
metadataValues := make(map[string][]string)
|
||||
|
||||
// First pass: count occurrences and collect values
|
||||
for _, v := range options.ItemMetadata {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
|
||||
}
|
||||
key, value := parts[0], parts[1]
|
||||
metadataCounter[key]++
|
||||
metadataValues[key] = append(metadataValues[key], value)
|
||||
}
|
||||
|
||||
// Second pass: add headers with appropriate prefixes
|
||||
for key, count := range metadataCounter {
|
||||
if count == 1 {
|
||||
// Only one occurrence, use x-archive-meta-
|
||||
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
|
||||
} else {
|
||||
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
|
||||
for i, value := range metadataValues[key] {
|
||||
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if o.fs.opt.ItemDerive {
|
||||
headers["x-archive-queue-derive"] = "1"
|
||||
} else {
|
||||
headers["x-archive-queue-derive"] = "0"
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
|
||||
|
||||
for k, v := range headers {
|
||||
if strings.HasPrefix(k, "x-archive-meta") {
|
||||
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
@@ -277,11 +277,9 @@ machines.`)
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -292,11 +290,9 @@ machines.`)
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -307,11 +303,9 @@ machines.`)
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -322,11 +316,9 @@ machines.`)
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -924,19 +916,17 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
oauthConfig := &oauthutil.Config{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
}
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = defaultClientID
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
oauthConfig.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
@@ -950,8 +940,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||
oauthConfig.TokenURL = legacyTokenURL
|
||||
oauthConfig.AuthURL = legacyTokenURL
|
||||
|
||||
// add the request filter to fix token refresh
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
|
||||
@@ -5,18 +5,18 @@ package local
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
root, e := syscall.UTF16PtrFromString(f.root)
|
||||
root, e := windows.UTF16PtrFromString(f.root)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
if e1 != windows.Errno(0) {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
|
||||
@@ -6,6 +6,7 @@ package local
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
@@ -22,7 +23,7 @@ import (
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone {
|
||||
if runtime.GOOS != "darwin" || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
@@ -30,6 +31,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
@@ -44,11 +48,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = Clone(srcObj.path, f.localPath(remote))
|
||||
srcPath := srcObj.path
|
||||
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
|
||||
srcPath, err = filepath.EvalSymlinks(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = Clone(srcPath, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote, "server-side cloned!")
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
|
||||
16
backend/local/lchmod.go
Normal file
16
backend/local/lchmod.go
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
const haveLChmod = false
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// Can't do this safely on this OS - chmoding a symlink always
|
||||
// changes the destination.
|
||||
return nil
|
||||
}
|
||||
41
backend/local/lchmod_unix.go
Normal file
41
backend/local/lchmod_unix.go
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChmod = true
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
//
|
||||
// Borrowed from the syscall source since it isn't public.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || plan9 || js
|
||||
//go:build plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
19
backend/local/lchtimes_windows.go
Normal file
19
backend/local/lchtimes_windows.go
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
||||
}
|
||||
@@ -34,7 +34,6 @@ import (
|
||||
// Constants
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
|
||||
@@ -101,10 +100,8 @@ Metadata is supported on files and directories.
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
@@ -379,17 +376,22 @@ type Directory struct {
|
||||
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Override --local-links with --links if set
|
||||
if ci.Links {
|
||||
opt.TranslateSymlinks = true
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
@@ -435,9 +437,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
@@ -508,8 +510,8 @@ func (f *Fs) caseInsensitive() bool {
|
||||
//
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
}
|
||||
|
||||
@@ -692,7 +694,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
newRemote += fs.LinkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
|
||||
@@ -73,7 +73,6 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Test corrupted on transfer
|
||||
@@ -111,7 +110,7 @@ func TestSymlink(t *testing.T) {
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -140,7 +139,7 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
@@ -156,9 +155,9 @@ func TestSymlink(t *testing.T) {
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
@@ -166,7 +165,7 @@ func TestSymlink(t *testing.T) {
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -224,7 +223,7 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
b := bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
@@ -269,22 +268,66 @@ func TestMetadata(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Set fs into "-l" / "--links" mode
|
||||
f.opt.TranslateSymlinks = true
|
||||
|
||||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += fs.LinkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
// Get the symlink object
|
||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
||||
require.NoError(t, err)
|
||||
symlinkO := symlinkObj.(*Object)
|
||||
|
||||
// Record metadata for o
|
||||
oMeta, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test symlink first to check it doesn't mess up file
|
||||
t.Run("Symlink", func(t *testing.T) {
|
||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
||||
})
|
||||
|
||||
// Read it again
|
||||
oMetaNew, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that operating on the symlink didn't change the file it was pointing to
|
||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
||||
|
||||
// Now run the same tests on the file
|
||||
t.Run("File", func(t *testing.T) {
|
||||
testMetadata(t, r, o, when)
|
||||
})
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
@@ -307,6 +350,10 @@ func TestMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
||||
}
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
@@ -321,18 +368,21 @@ func TestMetadata(t *testing.T) {
|
||||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
t.Helper()
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
t.Helper()
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -342,13 +392,12 @@ func TestMetadata(t *testing.T) {
|
||||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
@@ -372,6 +421,10 @@ func TestMetadata(t *testing.T) {
|
||||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
// Don't change xattr if not supported on symlinks
|
||||
delete(newM, "potato")
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -381,7 +434,11 @@ func TestMetadata(t *testing.T) {
|
||||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
expectedMode := 0767
|
||||
if o.translatedLink && runtime.GOOS == "linux" {
|
||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
||||
}
|
||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
@@ -391,11 +448,10 @@ func TestMetadata(t *testing.T) {
|
||||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
@@ -572,4 +628,35 @@ func TestCopySymlink(t *testing.T) {
|
||||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
|
||||
// Set fs into "-L/--copy-links" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst2"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err = f.NewObject(ctx, "src/link.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a NON-symlink and it has the right contents
|
||||
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
|
||||
fi, err := os.Lstat(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
|
||||
want := fstest.NewItem("dst2/link.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
|
||||
// Test that copying a normal file also works
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
want = fstest.NewItem("dst2/file.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
}
|
||||
|
||||
@@ -105,7 +105,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
err = setBTime(o.path, btime)
|
||||
if o.translatedLink {
|
||||
err = lsetBTime(o.path, btime)
|
||||
} else {
|
||||
err = setBTime(o.path, btime)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
@@ -121,7 +125,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if o.translatedLink {
|
||||
err = os.Lchown(o.path, uid, gid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
@@ -132,7 +140,16 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
if o.translatedLink {
|
||||
if haveLChmod {
|
||||
err = lChmod(o.path, os.FileMode(umode))
|
||||
} else {
|
||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
|
||||
@@ -13,3 +13,9 @@ func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,15 +9,20 @@ import (
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
// setTimes sets any of atime, mtime or btime
|
||||
// if link is set it sets a link rather than the target
|
||||
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
if link {
|
||||
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
|
||||
}
|
||||
h, err := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
syscall.OPEN_EXISTING, fileFlag, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -27,6 +32,28 @@ func setBTime(name string, btime time.Time) (err error) {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
var patime, pmtime, pbtime *syscall.Filetime
|
||||
if !atime.IsZero() {
|
||||
t := syscall.NsecToFiletime(atime.UnixNano())
|
||||
patime = &t
|
||||
}
|
||||
if !mtime.IsZero() {
|
||||
t := syscall.NsecToFiletime(mtime.UnixNano())
|
||||
pmtime = &t
|
||||
}
|
||||
if !btime.IsZero() {
|
||||
t := syscall.NsecToFiletime(btime.UnixNano())
|
||||
pbtime = &t
|
||||
}
|
||||
return syscall.SetFileTime(h, pbtime, patime, pmtime)
|
||||
}
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, false)
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, true)
|
||||
}
|
||||
|
||||
@@ -68,14 +68,12 @@ var (
|
||||
)
|
||||
|
||||
// Description of how to authorize
|
||||
var oauthConfig = &oauth2.Config{
|
||||
var oauthConfig = &oauthutil.Config{
|
||||
ClientID: api.OAuthClientID,
|
||||
ClientSecret: "",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -438,7 +436,9 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
||||
if err != nil || !tokenIsValid(t) {
|
||||
fs.Infof(f, "Valid token not found, authorizing.")
|
||||
ctx := oauthutil.Context(ctx, f.cli)
|
||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
|
||||
oauth2Conf := oauthConfig.MakeOauth2Config()
|
||||
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
}
|
||||
if err == nil && !tokenIsValid(t) {
|
||||
err = errors.New("invalid token")
|
||||
|
||||
@@ -202,9 +202,14 @@ type SharingLinkType struct {
|
||||
type LinkType string
|
||||
|
||||
const (
|
||||
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
ViewLinkType LinkType = "view"
|
||||
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EditLinkType LinkType = "edit"
|
||||
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
|
||||
// content into a host webpage. Embed links are not available for OneDrive for
|
||||
// Business or SharePoint.
|
||||
EmbedLinkType LinkType = "embed"
|
||||
)
|
||||
|
||||
// LinkScope represents the scope of the link represented by this permission.
|
||||
@@ -212,9 +217,12 @@ const (
|
||||
type LinkScope string
|
||||
|
||||
const (
|
||||
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
|
||||
// AnonymousScope = Anyone with the link has access, without needing to sign in.
|
||||
// This may include people outside of your organization.
|
||||
AnonymousScope LinkScope = "anonymous"
|
||||
// OrganizationScope = Anyone signed into your organization (tenant) can use the
|
||||
// link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
OrganizationScope LinkScope = "organization"
|
||||
)
|
||||
|
||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||
@@ -236,10 +244,14 @@ type PermissionsType struct {
|
||||
type Role string
|
||||
|
||||
const (
|
||||
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
// ReadRole provides the ability to read the metadata and contents of the item.
|
||||
ReadRole Role = "read"
|
||||
// WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
WriteRole Role = "write"
|
||||
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
OwnerRole Role = "owner"
|
||||
// MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member"
|
||||
)
|
||||
|
||||
// PermissionsResponse is the response to the list permissions method
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -65,14 +64,21 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
// Define the paths used for token operations
|
||||
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
|
||||
authPath = "/oauth2/v2.0/authorize"
|
||||
tokenPath = "/oauth2/v2.0/token"
|
||||
|
||||
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
|
||||
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
// When using client credential OAuth flow, scope of .default is required in order
|
||||
// to use the permissions configured for the application within the tenant
|
||||
scopeAccessClientCred = fs.SpaceSepList{".default"}
|
||||
|
||||
// Base config for how to auth
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: scopeAccess,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
@@ -125,7 +131,7 @@ func init() {
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
@@ -183,6 +189,14 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
|
||||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||
|
||||
Set this if using
|
||||
- Client Credential flow
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
@@ -527,28 +541,54 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
|
||||
})
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
region, graphURL := getRegionURL(m)
|
||||
// Make the oauth config for the backend
|
||||
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
|
||||
// Copy the default oauthConfig
|
||||
oauthConfig := *oauthConfig
|
||||
|
||||
if config.State == "" {
|
||||
var accessScopes fs.SpaceSepList
|
||||
accessScopesString, _ := m.Get("access_scopes")
|
||||
err := accessScopes.Set(accessScopesString)
|
||||
// Set the scopes
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
|
||||
// Construct the auth URLs
|
||||
prefix := commonPathPrefix
|
||||
if opt.Tenant != "" {
|
||||
prefix = "/" + opt.Tenant
|
||||
}
|
||||
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
|
||||
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
|
||||
|
||||
// Check to see if we are using client credentials flow
|
||||
if opt.ClientCredentials {
|
||||
// Override scope to .default
|
||||
oauthConfig.Scopes = scopeAccessClientCred
|
||||
if opt.Tenant == "" {
|
||||
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
|
||||
}
|
||||
}
|
||||
|
||||
return &oauthConfig, nil
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, graphURL := getRegionURL(m)
|
||||
|
||||
// Check to see if this is the start of the state machine execution
|
||||
if conf.State == "" {
|
||||
conf, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
|
||||
}
|
||||
oauthConfig.Scopes = []string(accessScopes)
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
return nil, err
|
||||
}
|
||||
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
OAuth2Config: conf,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -556,9 +596,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||
}
|
||||
|
||||
// Create a REST client, build on the OAuth client created above
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
switch config.State {
|
||||
switch conf.State {
|
||||
case "choose_type":
|
||||
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
Value: "onedrive",
|
||||
@@ -584,7 +626,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}})
|
||||
case "choose_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "onedrive":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
@@ -602,16 +644,22 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
},
|
||||
})
|
||||
case "driveid":
|
||||
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
// Default the drive_id to the previous version in the config
|
||||
out.Option.Default, _ = m.Get("drive_id")
|
||||
return out, nil
|
||||
case "driveid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
finalDriveID: config.Result,
|
||||
finalDriveID: conf.Result,
|
||||
})
|
||||
case "siteid":
|
||||
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
||||
case "siteid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "url":
|
||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||
@@ -622,7 +670,7 @@ Examples:
|
||||
- "https://XXX.sharepoint.com/teams/ID"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
siteURL := conf.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
@@ -637,12 +685,12 @@ Examples:
|
||||
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
||||
case "path_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: config.Result,
|
||||
relativePath: conf.Result,
|
||||
})
|
||||
case "search":
|
||||
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
||||
case "search_end":
|
||||
searchTerm := config.Result
|
||||
searchTerm := conf.Result
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
@@ -664,10 +712,10 @@ Examples:
|
||||
})
|
||||
case "search_sites":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "driveid_final":
|
||||
finalDriveID := config.Result
|
||||
finalDriveID := conf.Result
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts := rest.Opts{
|
||||
@@ -686,12 +734,12 @@ Examples:
|
||||
|
||||
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
||||
case "driveid_final_end":
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("choose_type")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -702,7 +750,9 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ClientCredentials bool `config:"client_credentials"`
|
||||
AccessScopes fs.SpaceSepList `config:"access_scopes"`
|
||||
Tenant string `config:"tenant"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
@@ -827,7 +877,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
retry = true
|
||||
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
|
||||
}
|
||||
case 429: // Too Many Requests.
|
||||
case 429, 503: // Too Many Requests, Server Too Busy
|
||||
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
|
||||
retryAfter, parseErr := strconv.Atoi(values[0])
|
||||
@@ -990,13 +1040,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
|
||||
oauthConfig, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
@@ -1545,9 +1592,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return time.Millisecond
|
||||
}
|
||||
// While this is true for some OneDrive personal accounts, it
|
||||
// isn't true for all of them. See #8101 for details
|
||||
//
|
||||
// if f.driveType == driveTypePersonal {
|
||||
// return time.Millisecond
|
||||
// }
|
||||
return time.Second
|
||||
}
|
||||
|
||||
@@ -1606,7 +1656,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -1621,11 +1671,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Check we aren't overwriting a file on the same remote
|
||||
if srcObj.fs == f {
|
||||
srcPath := srcObj.rootPath()
|
||||
@@ -2553,8 +2610,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.New("can't upload content to a OneNote file")
|
||||
}
|
||||
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
// Only start the renewer if we have a valid one
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
||||
@@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
compareDirMeta(expectedMeta, actualMeta, false)
|
||||
|
||||
// modtime
|
||||
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision())
|
||||
// try changing it and re-check it
|
||||
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision())
|
||||
// ensure that f.DirSetModTime also works
|
||||
err = f.DirSetModTime(ctx, "subdir", t3)
|
||||
assert.NoError(t, err)
|
||||
@@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
assert.NoError(t, err)
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir.Remote() == "subdir" {
|
||||
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
|
||||
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision())
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -404,6 +404,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var uInfo usersInfoResponse
|
||||
var resp *http.Response
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/info.json/" + f.session.SessionID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(uInfo.StorageUsed),
|
||||
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
|
||||
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
@@ -1147,6 +1173,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
|
||||
@@ -231,3 +231,10 @@ type permissions struct {
|
||||
type uploadFileChunkReply struct {
|
||||
TotalWritten int64 `json:"TotalWritten"`
|
||||
}
|
||||
|
||||
// usersInfoResponse describes OpenDrive users/info.json response
|
||||
type usersInfoResponse struct {
|
||||
// This response contains many other values but these are the only ones currently in use
|
||||
StorageUsed int64 `json:"StorageUsed,string"`
|
||||
MaxStorage int64 `json:"MaxStorage,string"`
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
@@ -183,6 +184,9 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
||||
if ossPartNumber <= 8 {
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
}
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -106,9 +106,9 @@ func newOptions() []fs.Option {
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
|
||||
@@ -48,12 +48,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -61,8 +59,8 @@ var (
|
||||
)
|
||||
|
||||
// Update the TokenURL with the actual hostname
|
||||
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
|
||||
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) {
|
||||
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -79,7 +77,7 @@ func init() {
|
||||
fs.Errorf(nil, "Failed to read config: %v", err)
|
||||
}
|
||||
updateTokenURL(oauthConfig, optc.Hostname)
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
return errors.New("form not found in response")
|
||||
}
|
||||
@@ -399,14 +397,15 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
|
||||
return nil, fmt.Errorf("close file: %w", err)
|
||||
}
|
||||
|
||||
writer := &writerAt{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
fs: f,
|
||||
size: size,
|
||||
remote: remote,
|
||||
fd: openResult.FileDescriptor,
|
||||
fileID: openResult.Fileid,
|
||||
}
|
||||
|
||||
@@ -425,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
// create a new oauth client, reuse the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
@@ -18,21 +18,14 @@ import (
|
||||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||
type writerAt struct {
|
||||
ctx context.Context
|
||||
client *rest.Client
|
||||
fs *Fs
|
||||
size int64
|
||||
remote string
|
||||
fd int64
|
||||
fileID int64
|
||||
}
|
||||
|
||||
// Close implements WriterAt.Close.
|
||||
func (c *writerAt) Close() error {
|
||||
// close fd
|
||||
if _, err := c.fileClose(c.ctx); err != nil {
|
||||
return fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||
// caching issues when checking the size immediately after write.
|
||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||
@@ -72,8 +65,18 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||
inSHA1Bytes := sha1.Sum(buffer)
|
||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||
|
||||
client, err := c.fs.newSingleConnClient(c.ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
|
||||
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
// get target hash
|
||||
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -89,10 +92,15 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||
}
|
||||
|
||||
// upload buffer with offset if necessary
|
||||
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// close fd
|
||||
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
|
||||
return contentLength, fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
@@ -125,11 +133,40 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_open",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
|
||||
opts.Parameters.Set("flags", "0x0002") // O_WRITE
|
||||
|
||||
result := &api.FileOpenResponse{}
|
||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_checksum, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||
func (c *writerAt) fileChecksum(
|
||||
func fileChecksum(
|
||||
ctx context.Context,
|
||||
offset, count int64,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd, offset, count int64,
|
||||
) (*api.FileChecksumResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
@@ -140,26 +177,29 @@ func (c *writerAt) fileChecksum(
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||
|
||||
result := &api.FileChecksumResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_pwrite, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||
func (c *writerAt) filePWrite(
|
||||
func filePWrite(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
offset int64,
|
||||
buf []byte,
|
||||
) (*api.FilePWriteResponse, error) {
|
||||
@@ -176,24 +216,29 @@ func (c *writerAt) filePWrite(
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
result := &api.FilePWriteResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_close, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||
func fileClose(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
) (*api.FileCloseResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_close",
|
||||
@@ -201,11 +246,11 @@ func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: true,
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
|
||||
result := &api.FileCloseResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -513,6 +513,72 @@ type RequestDecompress struct {
|
||||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// CaptchaToken is a response to requestCaptchaToken api call
|
||||
type CaptchaToken struct {
|
||||
CaptchaToken string `json:"captcha_token"`
|
||||
ExpiresIn int64 `json:"expires_in"` // currently 300s
|
||||
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
URL string `json:"url,omitempty"` // a link for users to solve captcha
|
||||
}
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *CaptchaToken) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *CaptchaToken) Valid() bool {
|
||||
return t != nil && t.CaptchaToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// CaptchaTokenRequest is to request for captcha token
|
||||
type CaptchaTokenRequest struct {
|
||||
Action string `json:"action,omitempty"`
|
||||
CaptchaToken string `json:"captcha_token,omitempty"`
|
||||
ClientID string `json:"client_id,omitempty"`
|
||||
DeviceID string `json:"device_id,omitempty"`
|
||||
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
|
||||
type CaptchaTokenMeta struct {
|
||||
CaptchaSign string `json:"captcha_sign,omitempty"`
|
||||
ClientVersion string `json:"client_version,omitempty"`
|
||||
PackageName string `json:"package_name,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
|
||||
UserName string `json:"username,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
}
|
||||
|
||||
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
|
||||
type Token struct {
|
||||
TokenType string `json:"token_type"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
Sub string `json:"sub"`
|
||||
}
|
||||
|
||||
// Expiry returns expiry from expires in, so it should be called on retrieval
|
||||
// e must be non-nil.
|
||||
func (e *Token) Expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
||||
@@ -3,8 +3,10 @@ package pikpak
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -14,10 +16,13 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
@@ -262,15 +267,20 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
// If src is zero-length, the API will return
|
||||
// Error "cid and file_size is required" (400)
|
||||
// In this case, we can simply return cid == gcid
|
||||
return cid, nil
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("cid", cid)
|
||||
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
}
|
||||
|
||||
info := struct {
|
||||
@@ -368,11 +378,23 @@ func calcGcid(r io.Reader, size int64) (string, error) {
|
||||
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// unWrapObjectInfo returns the underlying Object unwrapped as much as
|
||||
// possible or nil even if it is an OverrideRemote
|
||||
func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object {
|
||||
if o, ok := oi.(fs.Object); ok {
|
||||
return fs.UnWrapObject(o)
|
||||
} else if do, ok := oi.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
return do.UnWrap()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// calcCid calculates Cid from source
|
||||
//
|
||||
// Cid is a simplified version of Gcid
|
||||
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
srcObj := unWrapObjectInfo(src)
|
||||
if srcObj == nil {
|
||||
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
||||
}
|
||||
@@ -408,6 +430,8 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// randomly generates device id used for request header 'x-device-id'
|
||||
//
|
||||
// original javascript implementation
|
||||
@@ -428,3 +452,206 @@ func genDeviceID() string {
|
||||
}
|
||||
return string(base)
|
||||
}
|
||||
|
||||
var md5Salt = []string{
|
||||
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||
"+r6CQVxjzJV6LCV",
|
||||
"F",
|
||||
"pFJRC",
|
||||
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||
"/750aCr4lm/Sly/c",
|
||||
"RB+DT/gZCrbV",
|
||||
"",
|
||||
"CyLsf7hdkIRxRm215hl",
|
||||
"7xHvLi2tOYP0Y92b",
|
||||
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||
"1UI3",
|
||||
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||
"ihtqpG6FMt65+Xk+tWUH2",
|
||||
"NhXXU9rg4XXdzo7u5o",
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
|
||||
timestamp = fmt.Sprint(time.Now().UnixMilli())
|
||||
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
|
||||
for _, salt := range md5Salt {
|
||||
str = md5Sum(str + salt)
|
||||
}
|
||||
sign = "1." + str
|
||||
return
|
||||
}
|
||||
|
||||
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
|
||||
req = &api.CaptchaTokenRequest{
|
||||
Action: action,
|
||||
CaptchaToken: oldToken, // can be empty initially
|
||||
ClientID: clientID,
|
||||
DeviceID: opt.DeviceID,
|
||||
Meta: new(api.CaptchaTokenMeta),
|
||||
}
|
||||
switch action {
|
||||
case "POST:/v1/auth/signin":
|
||||
req.Meta.UserName = opt.Username
|
||||
default:
|
||||
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
|
||||
req.Meta.CaptchaSign = captchaSign
|
||||
req.Meta.Timestamp = timestamp
|
||||
req.Meta.ClientVersion = clientVersion
|
||||
req.Meta.PackageName = packageName
|
||||
req.Meta.UserID = opt.UserID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CaptchaTokenSource stores updated captcha tokens in the config file
|
||||
type CaptchaTokenSource struct {
|
||||
mu sync.Mutex
|
||||
m configmap.Mapper
|
||||
opt *Options
|
||||
token *api.CaptchaToken
|
||||
ctx context.Context
|
||||
rst *pikpakClient
|
||||
}
|
||||
|
||||
// initialize CaptchaTokenSource from rclone.conf if possible
|
||||
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
|
||||
token := new(api.CaptchaToken)
|
||||
tokenString, ok := m.Get("captcha_token")
|
||||
if !ok || tokenString == "" {
|
||||
fs.Debugf(nil, "failed to read captcha token out of config file")
|
||||
} else {
|
||||
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
|
||||
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
|
||||
}
|
||||
}
|
||||
return &CaptchaTokenSource{
|
||||
m: m,
|
||||
opt: opt,
|
||||
token: token,
|
||||
ctx: ctx,
|
||||
rst: newPikpakClient(getClient(ctx, opt), opt),
|
||||
}
|
||||
}
|
||||
|
||||
// requestToken retrieves captcha token from API
|
||||
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
|
||||
}
|
||||
var info *api.CaptchaToken
|
||||
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
if err == nil && info.ExpiresIn != 0 {
|
||||
// populate to Expiry
|
||||
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
|
||||
cts.token = info // update with a new one
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
|
||||
oldToken := ""
|
||||
if cts.token != nil {
|
||||
oldToken = cts.token.CaptchaToken
|
||||
}
|
||||
action := "GET:/drive/v1/about"
|
||||
if opts.RootURL == "" && opts.Path != "" {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
|
||||
} else if u, err := url.Parse(opts.RootURL); err == nil {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
|
||||
}
|
||||
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
|
||||
if err := cts.requestToken(cts.ctx, req); err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
|
||||
}
|
||||
|
||||
// put it into rclone.conf
|
||||
tokenBytes, err := json.Marshal(cts.token)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
|
||||
}
|
||||
cts.m.Set("captcha_token", string(tokenBytes))
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
|
||||
// Invalidate resets existing captcha token for a forced refresh
|
||||
func (cts *CaptchaTokenSource) Invalidate() {
|
||||
cts.mu.Lock()
|
||||
cts.token.CaptchaToken = ""
|
||||
cts.mu.Unlock()
|
||||
}
|
||||
|
||||
// Token returns a valid captcha token
|
||||
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
|
||||
cts.mu.Lock()
|
||||
defer cts.mu.Unlock()
|
||||
if cts.token.Valid() {
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
return cts.refreshToken(opts)
|
||||
}
|
||||
|
||||
// pikpakClient wraps rest.Client with a handle of captcha token
|
||||
type pikpakClient struct {
|
||||
opt *Options
|
||||
client *rest.Client
|
||||
captcha *CaptchaTokenSource
|
||||
}
|
||||
|
||||
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
|
||||
// * error handler
|
||||
// * root url
|
||||
// * default headers
|
||||
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
|
||||
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
|
||||
for key, val := range map[string]string{
|
||||
"Referer": "https://mypikpak.com/",
|
||||
"x-client-id": clientID,
|
||||
"x-client-version": clientVersion,
|
||||
"x-device-id": opt.DeviceID,
|
||||
// "x-device-model": "firefox%2F129.0",
|
||||
// "x-device-name": "PC-Firefox",
|
||||
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
|
||||
// "x-net-work-type": "NONE",
|
||||
// "x-os-version": "Win32",
|
||||
// "x-platform-version": "1",
|
||||
// "x-protocol-version": "301",
|
||||
// "x-provider-name": "NONE",
|
||||
// "x-sdk-version": "8.0.3",
|
||||
} {
|
||||
client.SetHeader(key, val)
|
||||
}
|
||||
return &pikpakClient{
|
||||
client: client,
|
||||
opt: opt,
|
||||
}
|
||||
}
|
||||
|
||||
// This should be called right after pikpakClient initialized
|
||||
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
|
||||
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
if c.captcha != nil {
|
||||
token, err := c.captcha.Token(opts)
|
||||
if err != nil || token == "" {
|
||||
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
|
||||
}
|
||||
if opts.ExtraHeaders == nil {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
}
|
||||
opts.ExtraHeaders["x-captcha-token"] = token
|
||||
}
|
||||
return c.client.CallJSON(ctx, opts, request, response)
|
||||
}
|
||||
|
||||
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
|
||||
return c.client.Call(ctx, opts)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ package pikpak
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -51,6 +52,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -64,64 +66,74 @@ import (
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "YNxT9w7GMdWvEOKa"
|
||||
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
clientID = "YUMx5nI8ZU8Ap8pm"
|
||||
clientVersion = "2.0.0"
|
||||
packageName = "mypikpak.com"
|
||||
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
ClientID: clientID,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Returns OAuthOptions modified for pikpak
|
||||
func pikpakOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Advanced = true
|
||||
} else if opt.Name == config.ConfigClientSecret {
|
||||
opt.Advanced = true
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
|
||||
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
|
||||
// override default client id/secret
|
||||
if id, ok := m.Get("client_id"); ok && id != "" {
|
||||
oauthConfig.ClientID = id
|
||||
}
|
||||
if secret, ok := m.Get("client_secret"); ok && secret != "" {
|
||||
oauthConfig.ClientSecret = secret
|
||||
if opt.Username == "" {
|
||||
return errors.New("no username")
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
|
||||
}
|
||||
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
|
||||
// new device id if necessary
|
||||
if len(opt.DeviceID) != 32 {
|
||||
opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
}
|
||||
req := map[string]string{
|
||||
"username": opt.Username,
|
||||
"password": pass,
|
||||
"client_id": clientID,
|
||||
}
|
||||
var token api.Token
|
||||
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
|
||||
rst.captcha.Invalidate()
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
|
||||
}
|
||||
t := &oauth2.Token{
|
||||
AccessToken: token.AccessToken,
|
||||
TokenType: token.TokenType,
|
||||
RefreshToken: token.RefreshToken,
|
||||
Expiry: token.Expiry(),
|
||||
}
|
||||
return oauthutil.PutToken(name, m, t, false)
|
||||
}
|
||||
|
||||
@@ -160,7 +172,7 @@ func init() {
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(pikpakOAuthOptions(), []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
@@ -170,6 +182,18 @@ func init() {
|
||||
Help: "Pikpak password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "device_id",
|
||||
Help: "Device ID used for authorization.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_agent",
|
||||
Default: defaultUserAgent,
|
||||
Advanced: true,
|
||||
Help: fmt.Sprintf(`HTTP user agent for pikpak.
|
||||
|
||||
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
@@ -189,6 +213,11 @@ Fill in for rclone to use a non root folder as its starting point.
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_media_link",
|
||||
Default: false,
|
||||
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
|
||||
@@ -248,7 +277,7 @@ this may help to speed up the transfers.`,
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -256,9 +285,13 @@ this may help to speed up the transfers.`,
|
||||
type Options struct {
|
||||
Username string `config:"user"`
|
||||
Password string `config:"pass"`
|
||||
UserID string `config:"user_id"` // only available during runtime
|
||||
DeviceID string `config:"device_id"`
|
||||
UserAgent string `config:"user_agent"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
NoMediaLink bool `config:"no_media_link"`
|
||||
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
@@ -271,11 +304,10 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
rst *rest.Client // the connection to the server
|
||||
rst *pikpakClient // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
rootFolderID string // the id of the root folder
|
||||
deviceID string // device id used for api requests
|
||||
client *http.Client // authorized client
|
||||
m configmap.Mapper
|
||||
tokenMu *sync.Mutex // when renewing tokens
|
||||
@@ -429,6 +461,12 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
} else if apiErr.Reason == "file_space_not_enough" {
|
||||
// "file_space_not_enough" (8): Storage space is not enough
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
|
||||
// "captcha_invalid" (9): Verification code is invalid
|
||||
// This error occurred on the POST:/drive/v1/files endpoint
|
||||
// when a zero-byte file was uploaded with an invalid captcha token
|
||||
f.rst.captcha.Invalidate()
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,13 +490,36 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
// Override few config settings and create a client
|
||||
newCtx, ci := fs.AddConfig(ctx)
|
||||
ci.UserAgent = opt.UserAgent
|
||||
return fshttp.NewClient(newCtx)
|
||||
}
|
||||
|
||||
// newClientWithPacer sets a new http/rest client with a pacer to Fs
|
||||
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
|
||||
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
|
||||
var ts *oauthutil.TokenSource
|
||||
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create oauth client: %w", err)
|
||||
}
|
||||
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// parse user_id from oauth access token for later use
|
||||
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
|
||||
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
|
||||
info := struct {
|
||||
UserID string `json:"sub,omitempty"`
|
||||
}{}
|
||||
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
|
||||
f.opt.UserID = info.UserID
|
||||
}
|
||||
}
|
||||
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
return nil
|
||||
}
|
||||
@@ -491,9 +552,19 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
CanHaveEmptyDirectories: true, // can have empty directories
|
||||
NoMultiThreading: true, // can't have multiple threads downloading
|
||||
}).Fill(ctx, f)
|
||||
f.deviceID = genDeviceID()
|
||||
|
||||
// new device id if necessary
|
||||
if len(f.opt.DeviceID) != 32 {
|
||||
f.opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", f.opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
|
||||
}
|
||||
|
||||
if err := f.newClientWithPacer(ctx); err != nil {
|
||||
// re-authorize if necessary
|
||||
if strings.Contains(err.Error(), "invalid_grant") {
|
||||
return f, f.reAuthorize(ctx)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1510,15 +1581,14 @@ func (o *Object) setMetaData(info *api.File) (err error) {
|
||||
o.md5sum = info.Md5Checksum
|
||||
if info.Links.ApplicationOctetStream != nil {
|
||||
o.link = info.Links.ApplicationOctetStream
|
||||
if fid := parseFileID(o.link.URL); fid != "" {
|
||||
for mid, media := range info.Medias {
|
||||
if media.Link == nil {
|
||||
continue
|
||||
}
|
||||
if mfid := parseFileID(media.Link.URL); fid == mfid {
|
||||
fs.Debugf(o, "Using a media link from Medias[%d]", mid)
|
||||
o.link = media.Link
|
||||
break
|
||||
if !o.fs.opt.NoMediaLink {
|
||||
if fid := parseFileID(o.link.URL); fid != "" {
|
||||
for _, media := range info.Medias {
|
||||
if media.Link != nil && parseFileID(media.Link.URL) == fid {
|
||||
fs.Debugf(o, "Using a media link")
|
||||
o.link = media.Link
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1707,7 +1777,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
||||
gcid, err := o.fs.getGcid(ctx, src)
|
||||
if err != nil || gcid == "" {
|
||||
fs.Debugf(o, "calculating gcid: %v", err)
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
// No buffering; directly calculate gcid from source
|
||||
rc, err := srcObj.Open(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -43,7 +43,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -59,12 +58,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://www.premiumize.me/authorize",
|
||||
TokenURL: "https://www.premiumize.me/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://www.premiumize.me/authorize",
|
||||
TokenURL: "https://www.premiumize.me/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
|
||||
@@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithLink(ctx, remote, nil)
|
||||
_, err := tempF.newObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
// ErrorIsDir if possible without doing any extra work,
|
||||
// otherwise ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithLink(ctx, remote, nil)
|
||||
return f.newObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
|
||||
@@ -516,35 +516,27 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er
|
||||
return link, nil
|
||||
}
|
||||
|
||||
// readMetaDataForRemote reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// readMetaDataForLink reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) {
|
||||
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
|
||||
var err error
|
||||
if err = f.pacer.Call(func() (bool, error) {
|
||||
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
||||
return shouldRetry(ctx, err)
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link, fileSystemAttrs, nil
|
||||
return fileSystemAttrs, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
// Return an Object from a path and link
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) {
|
||||
if o.link != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link)
|
||||
if err != nil {
|
||||
return err
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
o.id = link.LinkID
|
||||
@@ -554,6 +546,10 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
||||
o.mimetype = link.MIMEType
|
||||
o.link = link
|
||||
|
||||
fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileSystemAttrs != nil {
|
||||
o.modTime = fileSystemAttrs.ModificationTime
|
||||
o.originalSize = &fileSystemAttrs.Size
|
||||
@@ -561,23 +557,18 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
||||
o.digests = &fileSystemAttrs.Digests
|
||||
}
|
||||
|
||||
return nil
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
// Return an Object from a path only
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
err := o.readMetaData(ctx, link)
|
||||
func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
return f.newObjectWithLink(ctx, remote, link)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
|
||||
@@ -572,6 +572,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We have successfully copied the file to random name
|
||||
// Check to see if file already exists first and delete it if so
|
||||
existingObj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
err = existingObj.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("server side copy: failed to remove existing file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -41,12 +40,10 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
putioConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
},
|
||||
putioConfig = &oauthutil.Config{
|
||||
Scopes: []string{},
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
||||
214
backend/s3/s3.go
214
backend/s3/s3.go
@@ -136,6 +136,9 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Outscale",
|
||||
Help: "OUTSCALE Object Storage (OOS)",
|
||||
}, {
|
||||
Value: "Petabox",
|
||||
Help: "Petabox Object Storage",
|
||||
@@ -151,6 +154,9 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "SeaweedFS",
|
||||
Help: "SeaweedFS S3",
|
||||
}, {
|
||||
Value: "Selectel",
|
||||
Help: "Selectel Object Storage",
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
@@ -488,6 +494,26 @@ func init() {
|
||||
Value: "eu-south-2",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "Outscale",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-west-2",
|
||||
Help: "Paris, France",
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "New Jersey, USA",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "California, USA",
|
||||
}, {
|
||||
Value: "cloudgouv-eu-west-1",
|
||||
Help: "SecNumCloud, Paris, France",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Tokyo, Japan",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
@@ -528,10 +554,19 @@ func init() {
|
||||
Value: "tw-001",
|
||||
Help: "Asia (Taiwan)",
|
||||
}},
|
||||
}, {
|
||||
// See endpoints for object storage regions: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains
|
||||
Name: "region",
|
||||
Help: "Region where your data stored.\n",
|
||||
Provider: "Selectel",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ru-1",
|
||||
Help: "St. Petersburg",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -1296,10 +1331,19 @@ func init() {
|
||||
Value: "s3-ap-northeast-1.qiniucs.com",
|
||||
Help: "Northeast Asia Endpoint 1",
|
||||
}},
|
||||
}, {
|
||||
// Selectel endpoints: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Selectel Object Storage.",
|
||||
Provider: "Selectel",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.ru-1.storage.selcloud.ru",
|
||||
Help: "Saint Petersburg",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1344,6 +1388,26 @@ func init() {
|
||||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "oos.eu-west-2.outscale.com",
|
||||
Help: "Outscale EU West 2 (Paris)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.us-east-2.outscale.com",
|
||||
Help: "Outscale US east 2 (New Jersey)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.us-west-1.outscale.com",
|
||||
Help: "Outscale EU West 1 (California)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.cloudgouv-eu-west-1.outscale.com",
|
||||
Help: "Outscale SecNumCloud (Paris)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.ap-northeast-1.outscale.com",
|
||||
Help: "Outscale AP Northeast 1 (Japan)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East 1 (N. Virginia)",
|
||||
@@ -1380,6 +1444,10 @@ func init() {
|
||||
Value: "s3.eu-west-2.wasabisys.com",
|
||||
Help: "Wasabi EU West 2 (Paris)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-south-1.wasabisys.com",
|
||||
Help: "Wasabi EU South 1 (Milan)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
|
||||
@@ -1408,14 +1476,6 @@ func init() {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "br-se1.magaluobjects.com",
|
||||
Help: "Magalu BR Southeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}, {
|
||||
Value: "br-ne1.magaluobjects.com",
|
||||
Help: "Magalu BR Northeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -1798,7 +1858,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1813,7 +1873,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Synology,Cloudflare",
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1988,7 +2048,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
Help: "Glacier Flexible Retrieval storage class",
|
||||
}, {
|
||||
Value: "DEEP_ARCHIVE",
|
||||
Help: "Glacier Deep Archive storage class",
|
||||
@@ -2054,13 +2114,16 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: #todo
|
||||
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Magalu.",
|
||||
Provider: "Magalu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER_IR",
|
||||
Help: "Glacier Instant Retrieval storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
@@ -2606,6 +2669,35 @@ knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_bucket",
|
||||
Help: strings.ReplaceAll(`Set to use AWS Directory Buckets
|
||||
|
||||
If you are using an AWS Directory Bucket then set this flag.
|
||||
|
||||
This will ensure no |Content-Md5| headers are sent and ensure |ETag|
|
||||
headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will
|
||||
be set on all objects whether single or multipart uploaded.
|
||||
|
||||
This also sets |no_check_bucket = true|.
|
||||
|
||||
Note that Directory Buckets do not support:
|
||||
|
||||
- Versioning
|
||||
- |Content-Encoding: gzip|
|
||||
|
||||
Rclone limitations with Directory Buckets:
|
||||
|
||||
- rclone does not support creating Directory Buckets with |rclone mkdir|
|
||||
- ... or removing them with |rclone rmdir| yet
|
||||
- Directory Buckets do not appear when doing |rclone lsf| at the top level.
|
||||
- Rclone can't remove auto created directories yet. In theory this should
|
||||
work with |directory_markers = true| but it doesn't.
|
||||
- Directories don't seem to appear in recursive (ListR) listings.
|
||||
`, "|", "`"),
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
Name: "sdk_log_mode",
|
||||
Help: strings.ReplaceAll(`Set to debug the SDK
|
||||
@@ -2780,6 +2872,7 @@ type Options struct {
|
||||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -3052,9 +3145,16 @@ func (s3logger) Logf(classification logging.Classification, format string, v ...
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
var awsConfig aws.Config
|
||||
// Make the default static auth
|
||||
v := aws.Credentials{
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
SecretAccessKey: opt.SecretAccessKey,
|
||||
SessionToken: opt.SessionToken,
|
||||
}
|
||||
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
|
||||
|
||||
// Try to fill in the config from the environment if env_auth=true
|
||||
if opt.EnvAuth {
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
configOpts := []func(*awsconfig.LoadOptions) error{}
|
||||
// Set the name of the profile if supplied
|
||||
if opt.Profile != "" {
|
||||
@@ -3079,13 +3179,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
case opt.SecretAccessKey == "":
|
||||
return nil, errors.New("secret_access_key not found")
|
||||
default:
|
||||
// Make the static auth
|
||||
v := aws.Credentials{
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
SecretAccessKey: opt.SecretAccessKey,
|
||||
SessionToken: opt.SessionToken,
|
||||
}
|
||||
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
|
||||
// static credentials are already set
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3245,7 +3339,7 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
||||
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
||||
urlEncodeListings = true // URL encode the listings to help with control characters
|
||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
||||
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
|
||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
@@ -3328,6 +3422,8 @@ func setQuirks(opt *Options) {
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // untested
|
||||
case "Outscale":
|
||||
virtualHostStyle = false
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
@@ -3350,6 +3446,8 @@ func setQuirks(opt *Options) {
|
||||
}
|
||||
urlEncodeListings = true
|
||||
useAlreadyExists = true
|
||||
case "Selectel":
|
||||
urlEncodeListings = false
|
||||
case "SeaweedFS":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
@@ -3367,6 +3465,10 @@ func setQuirks(opt *Options) {
|
||||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
useAlreadyExists = false // returns BucketAlreadyExists
|
||||
// Storj doesn't support multi-part server side copy:
|
||||
// https://github.com/storj/roadmap/issues/40
|
||||
// So make cutoff very large which it does support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
case "Synology":
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false // untested
|
||||
@@ -3547,6 +3649,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// MD5 digest of their object data.
|
||||
f.etagIsNotMD5 = true
|
||||
}
|
||||
if opt.DirectoryBucket {
|
||||
// Objects uploaded to directory buckets appear to have random ETags
|
||||
//
|
||||
// This doesn't appear to be documented
|
||||
f.etagIsNotMD5 = true
|
||||
// The normal API doesn't work for creating directory buckets, so don't try
|
||||
f.opt.NoCheckBucket = true
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -3567,6 +3677,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
if opt.Provider == "AWS" {
|
||||
f.features.DoubleSlash = true
|
||||
}
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
@@ -4038,7 +4151,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if !opt.findFile {
|
||||
if opt.directory != "" {
|
||||
if opt.directory != "" && (opt.prefix == "" && !bucket.IsAllSlashes(opt.directory) || opt.prefix != "" && !strings.HasSuffix(opt.directory, "/")) {
|
||||
opt.directory += "/"
|
||||
}
|
||||
}
|
||||
@@ -4135,14 +4248,18 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, opt.prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
fs.Logf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
// Trim one slash off the remote name
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
if remote == "" || bucket.IsAllSlashes(remote) {
|
||||
remote += "/"
|
||||
}
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &types.Object{Key: &remote}, nil, true)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -5745,12 +5862,31 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
ContentEncoding: header("Content-Encoding"),
|
||||
ContentLanguage: header("Content-Language"),
|
||||
ContentType: header("Content-Type"),
|
||||
StorageClass: types.StorageClass(*header("X-Amz-Storage-Class")),
|
||||
StorageClass: types.StorageClass(deref(header("X-Amz-Storage-Class"))),
|
||||
}
|
||||
o.setMetaData(&head)
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// middleware to stop the SDK adding `Accept-Encoding: identity`
|
||||
func removeDisableGzip() func(*middleware.Stack) error {
|
||||
return func(stack *middleware.Stack) error {
|
||||
_, err := stack.Finalize.Remove("DisableAcceptEncodingGzip")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// middleware to set Accept-Encoding to how we want it
|
||||
//
|
||||
// This make sure we download compressed files as-is from all platforms
|
||||
func (f *Fs) acceptEncoding() (APIOptions []func(*middleware.Stack) error) {
|
||||
APIOptions = append(APIOptions, removeDisableGzip())
|
||||
if f.opt.UseAcceptEncodingGzip.Value {
|
||||
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
||||
}
|
||||
return APIOptions
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -5784,11 +5920,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var APIOptions []func(*middleware.Stack) error
|
||||
|
||||
// Override the automatic decompression in the transport to
|
||||
// download compressed files as-is
|
||||
if o.fs.opt.UseAcceptEncodingGzip.Value {
|
||||
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
||||
}
|
||||
// Set the SDK to always download compressed files as-is
|
||||
APIOptions = append(APIOptions, o.fs.acceptEncoding()...)
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
@@ -5926,7 +6059,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
@@ -5939,8 +6072,8 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
bucket: mOut.Bucket,
|
||||
key: mOut.Key,
|
||||
bucket: ui.req.Bucket,
|
||||
key: ui.req.Key,
|
||||
uploadID: mOut.UploadId,
|
||||
multiPartUploadInput: &mReq,
|
||||
completedParts: make([]types.CompletedPart, 0),
|
||||
@@ -6028,6 +6161,10 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
}
|
||||
if w.f.opt.DirectoryBucket {
|
||||
// Directory buckets do not support "Content-Md5" header
|
||||
uploadPartReq.ContentMD5 = nil
|
||||
}
|
||||
var uout *s3.UploadPartOutput
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry and after reading md5
|
||||
@@ -6040,6 +6177,9 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
if chunkNumber <= 8 {
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
}
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
@@ -6304,7 +6444,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C or directory buckets
|
||||
// provided checksums aren't disabled
|
||||
ui.req.Metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
@@ -6319,7 +6459,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
if size >= 0 {
|
||||
ui.req.ContentLength = &size
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
if md5sumBase64 != "" && !o.fs.opt.DirectoryBucket {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
|
||||
@@ -23,14 +23,20 @@ func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
opt := &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
TiersToTest: []string{"STANDARD"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
// Test wider range of tiers on AWS
|
||||
if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" {
|
||||
opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"}
|
||||
}
|
||||
fstests.Run(t, opt)
|
||||
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
|
||||
@@ -99,6 +99,11 @@ Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pubkey",
|
||||
Help: `SSH public certificate for public certificate based authentication.
|
||||
Set this if you have a signed certificate you want to use for authentication.
|
||||
If specified will override pubkey_file.`,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
@@ -511,6 +516,7 @@ type Options struct {
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKey string `config:"pubkey"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
@@ -997,13 +1003,21 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
if pubkeyFile != "" || opt.PubKey != "" {
|
||||
pubKeyRaw := []byte(opt.PubKey)
|
||||
// Use this error if public key is provided inline and is not a certificate
|
||||
// if public key file is provided instead, use the err in the if block
|
||||
notACertError := errors.New("public key provided is not a certificate: " + opt.PubKey)
|
||||
if opt.PubKey == "" {
|
||||
notACertError = errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
err := error(nil)
|
||||
pubKeyRaw, err = os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(pubKeyRaw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse cert file: %w", err)
|
||||
}
|
||||
@@ -1017,7 +1031,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// knows everything it needs.
|
||||
cert, ok := pk.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
return nil, notACertError
|
||||
}
|
||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||
if err != nil {
|
||||
@@ -2087,10 +2101,10 @@ func (file *objectReader) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close a reader of a remote sftp file
|
||||
func (file *objectReader) Close() (err error) {
|
||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||
err = file.sftpFile.Close()
|
||||
// Close the pipeReader so writes to the pipeWriter fail
|
||||
_ = file.pipeReader.Close()
|
||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||
err = file.sftpFile.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
|
||||
@@ -97,7 +97,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -115,13 +114,11 @@ const (
|
||||
)
|
||||
|
||||
// Generate a new oauth2 config which we will update when we know the TokenURL
|
||||
func newOauthConfig(tokenURL string) *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
func newOauthConfig(tokenURL string) *oauthutil.Config {
|
||||
return &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
||||
TokenURL: tokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectPublicSecureURL,
|
||||
@@ -136,7 +133,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
oauthConfig := newOauthConfig("")
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
return errors.New("endpoint not found in response")
|
||||
}
|
||||
@@ -147,7 +144,7 @@ func init() {
|
||||
}
|
||||
endpoint := "https://" + subdomain + "." + apicp
|
||||
m.Set("endpoint", endpoint)
|
||||
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
|
||||
oauthConfig.TokenURL = endpoint + tokenPath
|
||||
return nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
|
||||
@@ -31,13 +31,29 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
}
|
||||
}
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
d := &smb2.Dialer{}
|
||||
if f.opt.UseKerberos {
|
||||
cl, err := getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spn := f.opt.SPN
|
||||
if spn == "" {
|
||||
spn = "cifs/" + f.opt.Host
|
||||
}
|
||||
|
||||
d.Initiator = &smb2.Krb5Initiator{
|
||||
Client: cl,
|
||||
TargetSPN: spn,
|
||||
}
|
||||
} else {
|
||||
d.Initiator = &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
session, err := d.DialConn(ctx, tconn, addr)
|
||||
|
||||
78
backend/smb/kerberos.go
Normal file
78
backend/smb/kerberos.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
)
|
||||
|
||||
var (
|
||||
kerberosClient *client.Client
|
||||
kerberosErr error
|
||||
kerberosOnce sync.Once
|
||||
)
|
||||
|
||||
// getKerberosClient returns a Kerberos client that can be used to authenticate.
|
||||
func getKerberosClient() (*client.Client, error) {
|
||||
if kerberosClient == nil || kerberosErr == nil {
|
||||
kerberosOnce.Do(func() {
|
||||
kerberosClient, kerberosErr = createKerberosClient()
|
||||
})
|
||||
}
|
||||
|
||||
return kerberosClient, kerberosErr
|
||||
}
|
||||
|
||||
// createKerberosClient creates a new Kerberos client.
|
||||
func createKerberosClient() (*client.Client, error) {
|
||||
cfgPath := os.Getenv("KRB5_CONFIG")
|
||||
if cfgPath == "" {
|
||||
cfgPath = "/etc/krb5.conf"
|
||||
}
|
||||
|
||||
cfg, err := config.Load(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment, falling back to the
|
||||
// default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
switch {
|
||||
case strings.Contains(ccachePath, ":"):
|
||||
parts := strings.SplitN(ccachePath, ":", 2)
|
||||
switch parts[0] {
|
||||
case "FILE":
|
||||
ccachePath = parts[1]
|
||||
case "DIR":
|
||||
primary, err := os.ReadFile(filepath.Join(parts[1], "primary"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ccachePath = filepath.Join(parts[1], strings.TrimSpace(string(primary)))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
|
||||
}
|
||||
case ccachePath == "":
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccachePath = "/tmp/krb5cc_" + u.Uid
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.NewFromCCache(ccache, cfg)
|
||||
}
|
||||
@@ -76,6 +76,16 @@ authentication, and it often needs to be set for clusters. For example:
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_kerberos",
|
||||
Help: `Use Kerberos authentication.
|
||||
|
||||
If set, rclone will use Kerberos authentication instead of NTLM. This
|
||||
requires a valid Kerberos configuration and credentials cache to be
|
||||
available, either in the default locations or as specified by the
|
||||
KRB5_CONFIG and KRB5CCNAME environment variables.
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -126,6 +136,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
UseKerberos bool `config:"use_kerberos"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
@@ -601,9 +612,10 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
}
|
||||
|
||||
fi, err := cn.smbShare.Stat(reqDir)
|
||||
if err == nil {
|
||||
o.statResult = fi
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetModTime: stat: %w", err)
|
||||
}
|
||||
o.statResult = fi
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -685,7 +697,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.statResult, _ = cn.smbShare.Stat(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
}()
|
||||
|
||||
@@ -723,7 +734,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the modified time
|
||||
// Set the modified time and also o.statResult
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
package smb_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/smb"
|
||||
@@ -15,3 +16,13 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
krb5Dir := t.TempDir()
|
||||
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
|
||||
t.Setenv("KRB5CCNAME", filepath.Join(krb5Dir, "ccache"))
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSMBKerberos:rclone",
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -867,13 +868,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -890,6 +891,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -14,21 +14,30 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) {
|
||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
if *wasLocked {
|
||||
// Assume a 404 error after we've received a 423 error is actually a success
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// 423 LOCKED
|
||||
if resp != nil && resp.StatusCode == 423 {
|
||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
*wasLocked = true
|
||||
fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime)
|
||||
time.Sleep(*sleepTime)
|
||||
*sleepTime *= 2
|
||||
return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
@@ -180,9 +189,11 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs
|
||||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
sleepTime := 5 * time.Second
|
||||
wasLocked := false
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||
|
||||
@@ -161,7 +161,24 @@ Set to 0 to disable chunked uploading.
|
||||
Default: false,
|
||||
},
|
||||
fshttp.UnixSocketConfig,
|
||||
},
|
||||
{
|
||||
Name: "auth_redirect",
|
||||
Help: `Preserve authentication on redirect.
|
||||
|
||||
If the server redirects rclone to a new domain when it is trying to
|
||||
read a file then normally rclone will drop the Authorization: header
|
||||
from the request.
|
||||
|
||||
This is standard security practice to avoid sending your credentials
|
||||
to an unknown webserver.
|
||||
|
||||
However this is desirable in some circumstances. If you are getting
|
||||
an error like "401 Unauthorized" when rclone is attempting to read
|
||||
files from the webdav server then you can try this option.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -180,6 +197,7 @@ type Options struct {
|
||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
||||
UnixSocket string `config:"unix_socket"`
|
||||
AuthRedirect bool `config:"auth_redirect"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -1456,6 +1474,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
AuthRedirect: o.fs.opt.AuthRedirect, // allow redirects to preserve Auth
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
|
||||
@@ -22,13 +22,13 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// oAuth
|
||||
@@ -46,11 +46,9 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
||||
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
||||
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -713,7 +711,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -721,12 +719,21 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
err = f.mkParentDirs(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
|
||||
// Find and remove existing object
|
||||
//
|
||||
// Note that the overwrite flag doesn't seem to work for server side copy
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
||||
@@ -27,8 +27,8 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// User is a Zoho user we are only interested in the ZUID here
|
||||
type User struct {
|
||||
// OAuthUser is a Zoho user we are only interested in the ZUID here
|
||||
type OAuthUser struct {
|
||||
FirstName string `json:"First_Name"`
|
||||
Email string `json:"Email"`
|
||||
LastName string `json:"Last_Name"`
|
||||
@@ -36,12 +36,41 @@ type User struct {
|
||||
ZUID int64 `json:"ZUID"`
|
||||
}
|
||||
|
||||
// TeamWorkspace represents a Zoho Team or workspace
|
||||
// UserInfoResponse is returned by the user info API.
|
||||
type UserInfoResponse struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"users"`
|
||||
Attributes struct {
|
||||
EmailID string `json:"email_id"`
|
||||
Edition string `json:"edition"`
|
||||
} `json:"attributes"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// PrivateSpaceInfo gives basic information about a users private folder.
|
||||
type PrivateSpaceInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CurrentTeamInfo gives information about the current user in a team.
|
||||
type CurrentTeamInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
}
|
||||
}
|
||||
|
||||
// TeamWorkspace represents a Zoho Team, Workspace or Private Space
|
||||
// It's actually a VERY large json object that differs between
|
||||
// Team and Workspace but we are only interested in some fields
|
||||
// that both of them have so we can use the same struct for both
|
||||
// Team and Workspace and Private Space but we are only interested in some fields
|
||||
// that all of them have so we can use the same struct.
|
||||
type TeamWorkspace struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Attributes struct {
|
||||
Name string `json:"name"`
|
||||
Created Time `json:"created_time_in_millisecond"`
|
||||
@@ -49,7 +78,8 @@ type TeamWorkspace struct {
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// TeamWorkspaceResponse is the response by the list teams api
|
||||
// TeamWorkspaceResponse is the response by the list teams API, list workspace API
|
||||
// or list team private spaces API.
|
||||
type TeamWorkspaceResponse struct {
|
||||
TeamWorkspace []TeamWorkspace `json:"data"`
|
||||
}
|
||||
@@ -180,11 +210,38 @@ func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||
return &ufi, nil
|
||||
}
|
||||
|
||||
// LargeUploadInfo is once again a slightly different version of UploadInfo
|
||||
// returned as part of an LargeUploadResponse by the large file upload API.
|
||||
type LargeUploadInfo struct {
|
||||
Attributes struct {
|
||||
ParentID string `json:"parent_id"`
|
||||
FileName string `json:"file_name"`
|
||||
RessourceID string `json:"resource_id"`
|
||||
FileInfo string `json:"file_info"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// GetUploadFileInfo decodes the embedded FileInfo
|
||||
func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||
var ufi UploadFileInfo
|
||||
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
|
||||
}
|
||||
return &ufi, nil
|
||||
}
|
||||
|
||||
// UploadResponse is the response to a file Upload
|
||||
type UploadResponse struct {
|
||||
Uploads []UploadInfo `json:"data"`
|
||||
}
|
||||
|
||||
// LargeUploadResponse is the response returned by large file upload API.
|
||||
type LargeUploadResponse struct {
|
||||
Uploads []LargeUploadInfo `json:"data"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// WriteMetadataRequest is used to write metadata for a
|
||||
// single item
|
||||
type WriteMetadataRequest struct {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@@ -36,31 +37,35 @@ const (
|
||||
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
||||
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
maxSleep = 60 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
configRootID = "root_folder_id"
|
||||
|
||||
defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"aaaserver.profile.read",
|
||||
"WorkDrive.team.READ",
|
||||
"WorkDrive.workspace.READ",
|
||||
"WorkDrive.files.ALL",
|
||||
"ZohoFiles.files.ALL",
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
rootURL = "https://workdrive.zoho.eu/api/v1"
|
||||
downloadURL = "https://download.zoho.eu/v1/workdrive"
|
||||
uploadURL = "http://upload.zoho.eu/workdrive-api/v1/"
|
||||
accountsURL = "https://accounts.zoho.eu"
|
||||
)
|
||||
|
||||
@@ -79,7 +84,7 @@ func init() {
|
||||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err)
|
||||
}
|
||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
@@ -88,12 +93,12 @@ func init() {
|
||||
|
||||
switch config.State {
|
||||
case "":
|
||||
return oauthutil.ConfigOut("teams", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
})
|
||||
case "teams":
|
||||
case "type":
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
@@ -108,24 +113,43 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
authSrv, apiSrv, err := getSrvs()
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the user Info
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
userInfo, err := getUserInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
// If personal Edition only one private Space is available. Directly configure that.
|
||||
if userInfo.Data.Attributes.Edition == "PERSONAL" {
|
||||
return fs.ConfigResult("private_space", userInfo.Data.ID)
|
||||
}
|
||||
// Otherwise go to team selection
|
||||
return fs.ConfigResult("team", userInfo.Data.ID)
|
||||
case "private_space":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Name
|
||||
})
|
||||
case "team":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the teams
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
teams, err := listTeams(ctx, config.Result, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -143,9 +167,19 @@ func init() {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workspaces = append(workspaces, privateSpaces...)
|
||||
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
return workspace.ID, workspace.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
workspaceID := config.Result
|
||||
@@ -179,7 +213,13 @@ browser.`,
|
||||
}, {
|
||||
Value: "com.au",
|
||||
Help: "Australia",
|
||||
}}}, {
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to large file upload api (>= 10 MiB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -193,6 +233,7 @@ browser.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
Region string `config:"region"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
@@ -200,13 +241,15 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote workdrive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
downloadsrv *rest.Client // the connection to the download server
|
||||
uploadsrv *rest.Client // the connection to the upload server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a Zoho WorkDrive object
|
||||
@@ -229,19 +272,73 @@ func setupRegion(m configmap.Mapper) error {
|
||||
return errors.New("no region set")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
type workspaceInfo struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) {
|
||||
var userInfo api.UserInfoResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/me",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &userInfo, nil
|
||||
}
|
||||
|
||||
func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) {
|
||||
var currentTeamInfo api.CurrentTeamInfo
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/currentuser",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, ¤tTeamInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ¤tTeamInfo, err
|
||||
}
|
||||
|
||||
func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var privateSpaceListResponse api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + teamUserID + "/privatespace",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range privateSpaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"})
|
||||
}
|
||||
return workspaceList, err
|
||||
}
|
||||
|
||||
func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var teamList api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + strconv.FormatInt(uid, 10) + "/teams",
|
||||
Path: "/users/" + zuid + "/teams",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &teamList)
|
||||
@@ -251,18 +348,24 @@ func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWork
|
||||
return teamList.TeamWorkspace, nil
|
||||
}
|
||||
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var workspaceList api.TeamWorkspaceResponse
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var workspaceListResponse api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/workspaces",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceList)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return workspaceList.TeamWorkspace, nil
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range workspaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name})
|
||||
}
|
||||
|
||||
return workspaceList, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------
|
||||
@@ -285,13 +388,20 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
}
|
||||
authRetry := false
|
||||
|
||||
// Bail out early if we are missing OAuth Scopes.
|
||||
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") {
|
||||
fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.")
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
if resp != nil && resp.StatusCode == 429 {
|
||||
fs.Errorf(nil, "zoho: rate limit error received, sleeping for 60s: %v", err)
|
||||
time.Sleep(60 * time.Second)
|
||||
err = pacer.RetryAfterError(err, 60*time.Second)
|
||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60)
|
||||
return true, err
|
||||
}
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -389,6 +499,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < defaultUploadCutoff {
|
||||
return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff))
|
||||
}
|
||||
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -401,11 +516,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL),
|
||||
uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -643,9 +760,61 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/stream/upload",
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
ContentType: "application/octet-stream",
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"x-filename": url.QueryEscape(name),
|
||||
"x-parent_id": parent,
|
||||
"override-name-exist": "true",
|
||||
"upload-id": uuid.New().String(),
|
||||
"x-streammode": "1",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var uploadResponse *api.LargeUploadResponse
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload large error: %v", err)
|
||||
}
|
||||
if len(uploadResponse.Uploads) != 1 {
|
||||
return nil, errors.New("upload: invalid response")
|
||||
}
|
||||
upload := uploadResponse.Uploads[0]
|
||||
uploadInfo, err := upload.GetUploadFileInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload error: %w", err)
|
||||
}
|
||||
|
||||
// Fill in the api.Item from the api.UploadFileInfo
|
||||
var info api.Item
|
||||
info.ID = upload.Attributes.RessourceID
|
||||
info.Attributes.Name = upload.Attributes.FileName
|
||||
// info.Attributes.Type = not used
|
||||
info.Attributes.IsFolder = false
|
||||
// info.Attributes.CreatedTime = not used
|
||||
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
|
||||
// info.Attributes.UploadedTime = 0 not used
|
||||
info.Attributes.StorageInfo.Size = uploadInfo.Size
|
||||
info.Attributes.StorageInfo.FileCount = 0
|
||||
info.Attributes.StorageInfo.FolderCount = 0
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
params := url.Values{}
|
||||
params.Set("filename", name)
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
@@ -705,21 +874,40 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(f.opt.UploadCutoff) {
|
||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Upload the file
|
||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
@@ -1159,7 +1347,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.downloadsrv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1183,11 +1371,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Overwrite the old file
|
||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestZoho:",
|
||||
NilObject: (*zoho.Object)(nil),
|
||||
RemoteName: "TestZoho:",
|
||||
SkipInvalidUTF8: true,
|
||||
NilObject: (*zoho.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,11 +7,11 @@ for backend in $( find backend -maxdepth 1 -type d ); do
|
||||
continue
|
||||
fi
|
||||
|
||||
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
|
||||
commit=$(git log --oneline -- $backend | tail -n 1 | cut -d' ' -f1)
|
||||
if [ "$commit" == "" ]; then
|
||||
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
|
||||
commit=$(git log --oneline -- backend/$backend | tail -n 1 | cut -d' ' -f1)
|
||||
fi
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -n 1)
|
||||
echo $backend $version
|
||||
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
||||
done
|
||||
|
||||
@@ -21,12 +21,12 @@ def find_backends():
|
||||
def output_docs(backend, out, cwd):
|
||||
"""Output documentation for backend options to out"""
|
||||
out.flush()
|
||||
subprocess.check_call(["./rclone", "help", "backend", backend], stdout=out)
|
||||
subprocess.check_call(["./rclone", "--config=/notfound", "help", "backend", backend], stdout=out)
|
||||
|
||||
def output_backend_tool_docs(backend, out, cwd):
|
||||
"""Output documentation for backend tool to out"""
|
||||
out.flush()
|
||||
subprocess.call(["./rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
|
||||
def alter_doc(backend):
|
||||
"""Alter the documentation for backend"""
|
||||
|
||||
@@ -7,6 +7,7 @@ conversion into man pages etc.
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
@@ -35,6 +36,7 @@ docs = [
|
||||
"box.md",
|
||||
"cache.md",
|
||||
"chunker.md",
|
||||
"cloudinary.md",
|
||||
"sharefile.md",
|
||||
"crypt.md",
|
||||
"compress.md",
|
||||
@@ -52,6 +54,7 @@ docs = [
|
||||
"hidrive.md",
|
||||
"http.md",
|
||||
"imagekit.md",
|
||||
"iclouddrive.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
@@ -190,13 +193,23 @@ def main():
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
build_date = datetime.utcfromtimestamp(
|
||||
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
||||
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % build_date.strftime("%b %d, %Y"))
|
||||
# NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
```
|
||||
%s
|
||||
```
|
||||
""" % (build_date.strftime("%b %d, %Y"), help_output))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
|
||||
@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
@@ -52,12 +52,12 @@ func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
@@ -65,7 +65,7 @@ func main() {
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
|
||||
@@ -7,15 +7,18 @@ Run with no arguments to test all backends or a supply a list of
|
||||
backends to test.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
all_backends = "backend/all/all.go"
|
||||
|
||||
# compile command which is more or less like the production builds
|
||||
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
# disable CGO as that makes a lot of difference to binary size
|
||||
os.environ["CGO_ENABLED"]="0"
|
||||
|
||||
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
||||
|
||||
@@ -43,6 +46,9 @@ def write_all(orig_all, backend):
|
||||
# Comment out line matching backend
|
||||
if match and match.group(1) == backend:
|
||||
line = "// " + line
|
||||
# s3 and pikpak depend on each other
|
||||
if backend == "s3" and "pikpak" in line:
|
||||
line = "// " + line
|
||||
fd.write(line+"\n")
|
||||
|
||||
def compile():
|
||||
|
||||
@@ -13,7 +13,7 @@ if [ "$1" == "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
VERSION="$1"
|
||||
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
||||
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -n 1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
||||
|
||||
cat > "/tmp/${VERSION}-release-notes" <<EOF
|
||||
This is the ${VERSION} release of rclone.
|
||||
|
||||
@@ -5,20 +5,13 @@ import (
|
||||
"bytes"
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
logSave := log.Writer()
|
||||
logrusSave := logrus.StandardLogger().Writer()
|
||||
defer func() {
|
||||
err := logrusSave.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error closing logrusSave: %v", err)
|
||||
}
|
||||
}()
|
||||
logrusSave := logrus.StandardLogger().Out
|
||||
buf := &bytes.Buffer{}
|
||||
log.SetOutput(buf)
|
||||
logrus.SetOutput(buf)
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const configFile = "../../fstest/test_all/config.yaml"
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -207,15 +208,16 @@ type bisyncTest struct {
|
||||
parent1 fs.Fs
|
||||
parent2 fs.Fs
|
||||
// global flags
|
||||
argRemote1 string
|
||||
argRemote2 string
|
||||
noCompare bool
|
||||
noCleanup bool
|
||||
golden bool
|
||||
debug bool
|
||||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
|
||||
argRemote1 string
|
||||
argRemote2 string
|
||||
noCompare bool
|
||||
noCleanup bool
|
||||
golden bool
|
||||
debug bool
|
||||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
|
||||
ignoreBlankHash bool // ignore blank hashes for backends where we allow them to be blank
|
||||
}
|
||||
|
||||
var color = bisync.Color
|
||||
@@ -946,6 +948,10 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
||||
if (!b.fs1.Features().CanHaveEmptyDirectories || !b.fs2.Features().CanHaveEmptyDirectories) && (b.testCase == "createemptysrcdirs" || b.testCase == "rmdirs") {
|
||||
b.t.Skip("skipping test as remote does not support empty dirs")
|
||||
}
|
||||
ignoreHashBackends := []string{"TestWebdavNextcloud", "TestWebdavOwncloud", "TestAzureFiles"} // backends that support hashes but allow them to be blank
|
||||
if slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs1.Name(), prefix) }) || slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs2.Name(), prefix) }) {
|
||||
b.ignoreBlankHash = true
|
||||
}
|
||||
if b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported {
|
||||
if b.testCase != "nomodtime" {
|
||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||
@@ -1551,6 +1557,12 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
if b.fs1.Hashes() == hash.Set(hash.None) || b.fs2.Hashes() == hash.Set(hash.None) {
|
||||
logReplacements = append(logReplacements, `^.*{hashtype} differ.*$`, dropMe)
|
||||
}
|
||||
if b.ignoreBlankHash {
|
||||
logReplacements = append(logReplacements,
|
||||
`^.*hash is missing.*$`, dropMe,
|
||||
`^.*not equal on recheck.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
rep := logReplacements
|
||||
if b.testCase == "dry_run" {
|
||||
rep = append(rep, dryrunReplacements...)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user