mirror of
https://github.com/rclone/rclone.git
synced 2025-12-21 10:43:37 +00:00
Compare commits
1 Commits
v1.60.1
...
fix-azureb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c726ae3afb |
97
.github/workflows/build.yml
vendored
97
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.16', 'go1.17']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.19.x'
|
go: '1.18.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -39,16 +39,9 @@ jobs:
|
|||||||
librclonetest: true
|
librclonetest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: linux_386
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.19.x'
|
|
||||||
goarch: 386
|
|
||||||
gotags: cmount
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-11
|
os: macos-11
|
||||||
go: '1.19.x'
|
go: '1.18.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -57,52 +50,63 @@ jobs:
|
|||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-11
|
os: macos-11
|
||||||
go: '1.19.x'
|
go: '1.18.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows_amd64
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.19.x'
|
go: '1.18.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
build_flags: '-include "^windows/amd64" -cgo'
|
||||||
build_flags: '-include "^windows/"'
|
build_args: '-buildmode exe'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
deploy: true
|
||||||
|
|
||||||
|
- job_name: windows_386
|
||||||
|
os: windows-latest
|
||||||
|
go: '1.18.x'
|
||||||
|
gotags: cmount
|
||||||
|
goarch: '386'
|
||||||
|
cgo: '1'
|
||||||
|
build_flags: '-include "^windows/386" -cgo'
|
||||||
build_args: '-buildmode exe'
|
build_args: '-buildmode exe'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.19.x'
|
go: '1.18.x'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
|
- job_name: go1.16
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.16.x'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
|
||||||
- job_name: go1.17
|
- job_name: go1.17
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.17.x'
|
go: '1.17.x'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
- job_name: go1.18
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.18.x'
|
|
||||||
quicktest: true
|
|
||||||
racequicktest: true
|
|
||||||
|
|
||||||
name: ${{ matrix.job_name }}
|
name: ${{ matrix.job_name }}
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
stable: 'false'
|
stable: 'false'
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
@@ -162,7 +166,7 @@ jobs:
|
|||||||
env
|
env
|
||||||
|
|
||||||
- name: Go module cache
|
- name: Go module cache
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
@@ -174,11 +178,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
- name: Rclone version
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
rclone version
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -226,10 +225,10 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Code quality test
|
- name: Code quality test
|
||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v2
|
||||||
with:
|
with:
|
||||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||||
version: latest
|
version: latest
|
||||||
@@ -242,18 +241,22 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with NDK version
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
go-version: 1.19.x
|
go-version: 1.18.x
|
||||||
|
|
||||||
|
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||||
|
- name: Force NDK version
|
||||||
|
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||||
|
|
||||||
- name: Go module cache
|
- name: Go module cache
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
@@ -274,29 +277,27 @@ jobs:
|
|||||||
go install golang.org/x/mobile/cmd/gobind@latest
|
go install golang.org/x/mobile/cmd/gobind@latest
|
||||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||||
env PATH=$PATH:~/go/bin gomobile init
|
env PATH=$PATH:~/go/bin gomobile init
|
||||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm-v7a gomobile build
|
- name: arm-v7a gomobile build
|
||||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||||
|
|
||||||
- name: arm-v7a Set environment variables
|
- name: arm-v7a Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||||
echo 'GOARM=7' >> $GITHUB_ENV
|
echo 'GOARM=7' >> $GITHUB_ENV
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: arm-v7a build
|
- name: arm-v7a build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||||
|
|
||||||
- name: arm64-v8a Set environment variables
|
- name: arm64-v8a Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||||
@@ -304,12 +305,12 @@ jobs:
|
|||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: arm64-v8a build
|
- name: arm64-v8a build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||||
|
|
||||||
- name: x86 Set environment variables
|
- name: x86 Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||||
@@ -317,12 +318,12 @@ jobs:
|
|||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: x86 build
|
- name: x86 build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||||
|
|
||||||
- name: x64 Set environment variables
|
- name: x64 Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||||
@@ -330,7 +331,7 @@ jobs:
|
|||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: x64 build
|
- name: x64 build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ jobs:
|
|||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
@@ -20,7 +20,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
tag: beta
|
tag: beta
|
||||||
imageName: rclone/rclone
|
imageName: rclone/rclone
|
||||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||||
publish: true
|
publish: true
|
||||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ jobs:
|
|||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Get actual patch version
|
- name: Get actual patch version
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
imageName: rclone/rclone
|
imageName: rclone/rclone
|
||||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||||
publish: true
|
publish: true
|
||||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
@@ -40,7 +40,7 @@ jobs:
|
|||||||
name: Build docker plugin job
|
name: Build docker plugin job
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish docker plugin
|
- name: Build and publish docker plugin
|
||||||
@@ -50,7 +50,7 @@ jobs:
|
|||||||
PLUGIN_USER=rclone
|
PLUGIN_USER=rclone
|
||||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
|
||||||
export PLUGIN_USER PLUGIN_ARCH
|
export PLUGIN_USER PLUGIN_ARCH
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ issues:
|
|||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-per-linter: 0
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ Make sure you
|
|||||||
* Add [documentation](#writing-documentation) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to Github:
|
||||||
|
|
||||||
git push -u origin my-new-feature
|
git push -u origin my-new-feature
|
||||||
|
|
||||||
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
|||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||||
|
|
||||||
## Using Git and GitHub ##
|
## Using Git and Github ##
|
||||||
|
|
||||||
### Committing your changes ###
|
### Committing your changes ###
|
||||||
|
|
||||||
|
|||||||
7060
MANUAL.html
generated
7060
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7649
MANUAL.txt
generated
7649
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
10
Makefile
10
Makefile
@@ -97,7 +97,7 @@ release_dep_linux:
|
|||||||
|
|
||||||
# Get the release dependencies we only install on Windows
|
# Get the release dependencies we only install on Windows
|
||||||
release_dep_windows:
|
release_dep_windows:
|
||||||
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
showupdates:
|
||||||
@@ -245,18 +245,18 @@ retag:
|
|||||||
startdev:
|
startdev:
|
||||||
@echo "Version is $(VERSION)"
|
@echo "Version is $(VERSION)"
|
||||||
@echo "Next version is $(NEXT_VERSION)"
|
@echo "Next version is $(NEXT_VERSION)"
|
||||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||||
echo "$(NEXT_VERSION)" > VERSION
|
echo "$(NEXT_VERSION)" > VERSION
|
||||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
startstable:
|
startstable:
|
||||||
@echo "Version is $(VERSION)"
|
@echo "Version is $(VERSION)"
|
||||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|||||||
25
README.md
25
README.md
@@ -29,7 +29,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
|
||||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
@@ -42,13 +41,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
|
||||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
@@ -62,20 +59,17 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
@@ -88,19 +82,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
|
|
||||||
### Virtual storage providers
|
|
||||||
|
|
||||||
These backends adapt or modify other storage providers
|
|
||||||
|
|
||||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
|
||||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
|
||||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
|
||||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
|
||||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
|
||||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
|
||||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
|
||||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||||
@@ -115,7 +96,7 @@ These backends adapt or modify other storage providers
|
|||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
* Multi-threaded downloads to local disk
|
* Multi-threaded downloads to local disk
|
||||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||||
|
|
||||||
## Installation & documentation
|
## Installation & documentation
|
||||||
|
|
||||||
@@ -136,5 +117,5 @@ Please see the [rclone website](https://rclone.org/) for:
|
|||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
This is free software under the terms of the MIT license (check the
|
This is free software under the terms of MIT the license (check the
|
||||||
[COPYING file](/COPYING) included in this package).
|
[COPYING file](/COPYING) included in this package).
|
||||||
|
|||||||
@@ -53,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
|
|||||||
version specified by `make updatedirect` in order to get rclone to
|
version specified by `make updatedirect` in order to get rclone to
|
||||||
build.
|
build.
|
||||||
|
|
||||||
## Tidy beta
|
|
||||||
|
|
||||||
At some point after the release run
|
|
||||||
|
|
||||||
bin/tidy-beta v1.55
|
|
||||||
|
|
||||||
where the version number is that of a couple ago to remove old beta binaries.
|
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
If rclone needs a point release due to some horrendous bug:
|
If rclone needs a point release due to some horrendous bug:
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package alias implements a virtual provider to rename existing remotes.
|
|
||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package all imports all the backends
|
|
||||||
package all
|
package all
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -10,7 +9,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
_ "github.com/rclone/rclone/backend/combine"
|
|
||||||
_ "github.com/rclone/rclone/backend/compress"
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
@@ -22,8 +20,8 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
_ "github.com/rclone/rclone/backend/hasher"
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
@@ -34,7 +32,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/netstorage"
|
_ "github.com/rclone/rclone/backend/netstorage"
|
||||||
_ "github.com/rclone/rclone/backend/onedrive"
|
_ "github.com/rclone/rclone/backend/onedrive"
|
||||||
_ "github.com/rclone/rclone/backend/opendrive"
|
_ "github.com/rclone/rclone/backend/opendrive"
|
||||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
@@ -44,7 +41,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sia"
|
_ "github.com/rclone/rclone/backend/sia"
|
||||||
_ "github.com/rclone/rclone/backend/smb"
|
|
||||||
_ "github.com/rclone/rclone/backend/storj"
|
_ "github.com/rclone/rclone/backend/storj"
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
|
|||||||
@@ -435,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
|
|||||||
query += " AND kind:" + folderKind
|
query += " AND kind:" + folderKind
|
||||||
} else if filesOnly {
|
} else if filesOnly {
|
||||||
query += " AND kind:" + fileKind
|
query += " AND kind:" + fileKind
|
||||||
//} else {
|
} else {
|
||||||
// FIXME none of these work
|
// FIXME none of these work
|
||||||
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
||||||
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
||||||
@@ -556,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// This is a workaround for Amazon sometimes returning
|
// This is a workaround for Amazon sometimes returning
|
||||||
//
|
//
|
||||||
// - 408 REQUEST_TIMEOUT
|
// * 408 REQUEST_TIMEOUT
|
||||||
// - 504 GATEWAY_TIMEOUT
|
// * 504 GATEWAY_TIMEOUT
|
||||||
// - 500 Internal server error
|
// * 500 Internal server error
|
||||||
//
|
//
|
||||||
// At the end of large uploads. The speculation is that the timeout
|
// At the end of large uploads. The speculation is that the timeout
|
||||||
// is waiting for the sha1 hashing to complete and the file may well
|
// is waiting for the sha1 hashing to complete and the file may well
|
||||||
@@ -626,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
|
|||||||
|
|
||||||
// Put the object into the container
|
// Put the object into the container
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -685,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1002,6 +1002,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
// +build !plan9,!solaris,!js
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -25,7 +26,6 @@ import (
|
|||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
@@ -372,9 +372,15 @@ func (o *Object) split() (container, containerPath string) {
|
|||||||
|
|
||||||
// validateAccessTier checks if azureblob supports user supplied tier
|
// validateAccessTier checks if azureblob supports user supplied tier
|
||||||
func validateAccessTier(tier string) bool {
|
func validateAccessTier(tier string) bool {
|
||||||
return strings.EqualFold(tier, string(azblob.AccessTierHot)) ||
|
switch tier {
|
||||||
strings.EqualFold(tier, string(azblob.AccessTierCool)) ||
|
case string(azblob.AccessTierHot),
|
||||||
strings.EqualFold(tier, string(azblob.AccessTierArchive))
|
string(azblob.AccessTierCool),
|
||||||
|
string(azblob.AccessTierArchive):
|
||||||
|
// valid cases
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// validatePublicAccess checks if azureblob supports use supplied public access level
|
// validatePublicAccess checks if azureblob supports use supplied public access level
|
||||||
@@ -538,10 +544,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("chunk size: %w", err)
|
return nil, fmt.Errorf("azure: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
if opt.ListChunkSize > maxListChunkSize {
|
if opt.ListChunkSize > maxListChunkSize {
|
||||||
return nil, fmt.Errorf("blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||||
}
|
}
|
||||||
if opt.Endpoint == "" {
|
if opt.Endpoint == "" {
|
||||||
opt.Endpoint = storageDefaultBaseURL
|
opt.Endpoint = storageDefaultBaseURL
|
||||||
@@ -550,12 +556,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.AccessTier == "" {
|
if opt.AccessTier == "" {
|
||||||
opt.AccessTier = string(defaultAccessTier)
|
opt.AccessTier = string(defaultAccessTier)
|
||||||
} else if !validateAccessTier(opt.AccessTier) {
|
} else if !validateAccessTier(opt.AccessTier) {
|
||||||
return nil, fmt.Errorf("supported access tiers are %s, %s and %s",
|
return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
||||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !validatePublicAccess((opt.PublicAccess)) {
|
if !validatePublicAccess((opt.PublicAccess)) {
|
||||||
return nil, fmt.Errorf("supported public access level are %s and %s",
|
return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||||
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -597,13 +603,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
case opt.UseEmulator:
|
case opt.UseEmulator:
|
||||||
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||||
}
|
}
|
||||||
var actualEmulatorEndpoint = emulatorBlobEndpoint
|
u, err = url.Parse(emulatorBlobEndpoint)
|
||||||
if opt.Endpoint != "" {
|
|
||||||
actualEmulatorEndpoint = opt.Endpoint
|
|
||||||
}
|
|
||||||
u, err = url.Parse(actualEmulatorEndpoint)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||||
}
|
}
|
||||||
@@ -647,7 +649,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
@@ -682,7 +684,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
case opt.Account != "" && opt.Key != "":
|
case opt.Account != "" && opt.Key != "":
|
||||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
@@ -702,7 +704,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
parts := azblob.NewBlobURLParts(*u)
|
parts := azblob.NewBlobURLParts(*u)
|
||||||
if parts.ContainerName != "" {
|
if parts.ContainerName != "" {
|
||||||
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
||||||
return nil, errors.New("container name in SAS URL and container provided in command do not match")
|
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||||
}
|
}
|
||||||
containerURL := azblob.NewContainerURL(*u, pipeline)
|
containerURL := azblob.NewContainerURL(*u, pipeline)
|
||||||
f.cntURLcache[parts.ContainerName] = &containerURL
|
f.cntURLcache[parts.ContainerName] = &containerURL
|
||||||
@@ -730,7 +732,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipe)
|
serviceURL = azblob.NewServiceURL(*u, pipe)
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("no authentication method configured")
|
return nil, errors.New("No authentication method configured")
|
||||||
}
|
}
|
||||||
f.svcURL = &serviceURL
|
f.svcURL = &serviceURL
|
||||||
|
|
||||||
@@ -1114,7 +1116,7 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
|||||||
|
|
||||||
// Put the object into the container
|
// Put the object into the container
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -1246,9 +1248,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1296,6 +1298,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||||
|
if size == int64(f.opt.ChunkSize) {
|
||||||
|
return f.pool
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool.New(
|
||||||
|
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||||
|
int(size),
|
||||||
|
f.ci.Transfers,
|
||||||
|
f.opt.MemoryPoolUseMmap,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
@@ -1327,7 +1342,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
}
|
}
|
||||||
data, err := base64.StdEncoding.DecodeString(o.md5)
|
data, err := base64.StdEncoding.DecodeString(o.md5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to decode Content-MD5: %q: %w", o.md5, err)
|
return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
|
||||||
}
|
}
|
||||||
return hex.EncodeToString(data), nil
|
return hex.EncodeToString(data), nil
|
||||||
}
|
}
|
||||||
@@ -1355,7 +1370,6 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
|||||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
//
|
|
||||||
// o.id
|
// o.id
|
||||||
// o.modTime
|
// o.modTime
|
||||||
// o.size
|
// o.size
|
||||||
@@ -1443,7 +1457,6 @@ func (o *Object) clearMetaData() {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
//
|
|
||||||
// o.id
|
// o.id
|
||||||
// o.modTime
|
// o.modTime
|
||||||
// o.size
|
// o.size
|
||||||
@@ -1519,7 +1532,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
var offset int64
|
var offset int64
|
||||||
var count int64
|
var count int64
|
||||||
if o.AccessTier() == azblob.AccessTierArchive {
|
if o.AccessTier() == azblob.AccessTierArchive {
|
||||||
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first")
|
return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||||
}
|
}
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@@ -1677,17 +1690,25 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadParts := maxUploadParts
|
// calculate size of parts/blocks
|
||||||
|
partSize := int(o.fs.opt.ChunkSize)
|
||||||
|
|
||||||
|
uploadParts := int64(maxUploadParts)
|
||||||
if uploadParts < 1 {
|
if uploadParts < 1 {
|
||||||
uploadParts = 1
|
uploadParts = 1
|
||||||
} else if uploadParts > maxUploadParts {
|
} else if uploadParts > maxUploadParts {
|
||||||
uploadParts = maxUploadParts
|
uploadParts = maxUploadParts
|
||||||
}
|
}
|
||||||
// calculate size of parts/blocks
|
|
||||||
partSize := chunksize.Calculator(o, src.Size(), uploadParts, o.fs.opt.ChunkSize)
|
// Adjust partSize until the number of parts/blocks is small enough.
|
||||||
|
if o.size/int64(partSize) >= uploadParts {
|
||||||
|
// Calculate partition size rounded up to the nearest MiB
|
||||||
|
partSize = int((((o.size / uploadParts) >> 20) + 1) << 20)
|
||||||
|
fs.Debugf(o, "Adjust partSize to %q", partSize)
|
||||||
|
}
|
||||||
|
|
||||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||||
BufferSize: int(partSize),
|
BufferSize: partSize,
|
||||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||||
Metadata: o.meta,
|
Metadata: o.meta,
|
||||||
BlobHTTPHeaders: httpHeaders,
|
BlobHTTPHeaders: httpHeaders,
|
||||||
@@ -1744,7 +1765,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
|
|||||||
// SetTier performs changing object tier
|
// SetTier performs changing object tier
|
||||||
func (o *Object) SetTier(tier string) error {
|
func (o *Object) SetTier(tier string) error {
|
||||||
if !validateAccessTier(tier) {
|
if !validateAccessTier(tier) {
|
||||||
return fmt.Errorf("tier %s not supported by Azure Blob Storage", tier)
|
return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if current tier already matches with desired tier
|
// Check if current tier already matches with desired tier
|
||||||
@@ -1755,12 +1776,12 @@ func (o *Object) SetTier(tier string) error {
|
|||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{}, azblob.RehydratePriorityStandard)
|
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{}, azblob.RehydratePriorityNone)
|
||||||
return o.fs.shouldRetry(ctx, err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to set Blob Tier: %w", err)
|
return fmt.Errorf("Failed to set Blob Tier: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set access tier on local object also, this typically
|
// Set access tier on local object also, this typically
|
||||||
|
|||||||
@@ -61,25 +61,3 @@ func TestServicePrincipalFileFailure(t *testing.T) {
|
|||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateAccessTier(t *testing.T) {
|
|
||||||
tests := map[string]struct {
|
|
||||||
accessTier string
|
|
||||||
want bool
|
|
||||||
}{
|
|
||||||
"hot": {"hot", true},
|
|
||||||
"HOT": {"HOT", true},
|
|
||||||
"Hot": {"Hot", true},
|
|
||||||
"cool": {"cool", true},
|
|
||||||
"archive": {"archive", true},
|
|
||||||
"empty": {"", false},
|
|
||||||
"unknown": {"unknown", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, test := range tests {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
got := validateAccessTier(test.accessTier)
|
|
||||||
assert.Equal(t, test.want, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
|||||||
|
|
||||||
b, err := ioutil.ReadAll(resp.Body)
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
|
||||||
}
|
}
|
||||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||||
@@ -130,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
|||||||
// storage API call.
|
// storage API call.
|
||||||
err = json.Unmarshal(b, &result)
|
err = json.Unmarshal(b, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package api provides types used by the Backblaze B2 API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -239,7 +238,7 @@ type GetFileInfoRequest struct {
|
|||||||
// If the original source of the file being uploaded has a last
|
// If the original source of the file being uploaded has a last
|
||||||
// modified time concept, Backblaze recommends using
|
// modified time concept, Backblaze recommends using
|
||||||
// src_last_modified_millis as the name, and a string holding the base
|
// src_last_modified_millis as the name, and a string holding the base
|
||||||
// 10 number of milliseconds since midnight, January 1, 1970
|
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||||
// programming language Java. It is intended to be compatible with
|
// programming language Java. It is intended to be compatible with
|
||||||
// Java's time long. For example, it can be passed directly into the
|
// Java's time long. For example, it can be passed directly into the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Package b2 provides an interface to the Backblaze B2 object storage system.
|
// Package b2 provides an interface to the Backblaze B2 object storage system
|
||||||
package b2
|
package b2
|
||||||
|
|
||||||
// FIXME should we remove sha1 checks from here as rclone now supports
|
// FIXME should we remove sha1 checks from here as rclone now supports
|
||||||
@@ -65,7 +65,6 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -107,11 +106,6 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
|||||||
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "version_at",
|
|
||||||
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
|
|
||||||
Default: fs.Time{},
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "hard_delete",
|
Name: "hard_delete",
|
||||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||||
@@ -217,7 +211,6 @@ type Options struct {
|
|||||||
Endpoint string `config:"endpoint"`
|
Endpoint string `config:"endpoint"`
|
||||||
TestMode string `config:"test_mode"`
|
TestMode string `config:"test_mode"`
|
||||||
Versions bool `config:"versions"`
|
Versions bool `config:"versions"`
|
||||||
VersionAt fs.Time `config:"version_at"`
|
|
||||||
HardDelete bool `config:"hard_delete"`
|
HardDelete bool `config:"hard_delete"`
|
||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||||
@@ -280,7 +273,7 @@ func (f *Fs) Root() string {
|
|||||||
// String converts this Fs to a string
|
// String converts this Fs to a string
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
if f.rootBucket == "" {
|
if f.rootBucket == "" {
|
||||||
return "B2 root"
|
return fmt.Sprintf("B2 root")
|
||||||
}
|
}
|
||||||
if f.rootDirectory == "" {
|
if f.rootDirectory == "" {
|
||||||
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
|
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
|
||||||
@@ -656,15 +649,15 @@ var errEndList = errors.New("end list")
|
|||||||
//
|
//
|
||||||
// (bucket, directory) is the starting directory
|
// (bucket, directory) is the starting directory
|
||||||
//
|
//
|
||||||
// If prefix is set then it is removed from all file names.
|
// If prefix is set then it is removed from all file names
|
||||||
//
|
//
|
||||||
// If addBucket is set then it adds the bucket to the start of the
|
// If addBucket is set then it adds the bucket to the start of the
|
||||||
// remotes generated.
|
// remotes generated
|
||||||
//
|
//
|
||||||
// If recurse is set the function will recursively list.
|
// If recurse is set the function will recursively list
|
||||||
//
|
//
|
||||||
// If limit is > 0 then it limits to that many files (must be less
|
// If limit is > 0 then it limits to that many files (must be less
|
||||||
// than 1000).
|
// than 1000)
|
||||||
//
|
//
|
||||||
// If hidden is set then it will list the hidden (deleted) files too.
|
// If hidden is set then it will list the hidden (deleted) files too.
|
||||||
//
|
//
|
||||||
@@ -703,12 +696,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_list_file_names",
|
Path: "/b2_list_file_names",
|
||||||
}
|
}
|
||||||
if hidden || f.opt.VersionAt.IsSet() {
|
if hidden {
|
||||||
opts.Path = "/b2_list_file_versions"
|
opts.Path = "/b2_list_file_versions"
|
||||||
}
|
}
|
||||||
|
|
||||||
lastFileName := ""
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
var response api.ListFileNamesResponse
|
var response api.ListFileNamesResponse
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
@@ -738,21 +728,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
if addBucket {
|
if addBucket {
|
||||||
remote = path.Join(bucket, remote)
|
remote = path.Join(bucket, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.opt.VersionAt.IsSet() {
|
|
||||||
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
|
|
||||||
// Ignore versions that were created after the specified time
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if file.Name == lastFileName {
|
|
||||||
// Ignore versions before the already returned version
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send object
|
// Send object
|
||||||
lastFileName = file.Name
|
|
||||||
err = fn(remote, file, isDirectory)
|
err = fn(remote, file, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errEndList {
|
if err == errEndList {
|
||||||
@@ -1025,7 +1001,7 @@ func (f *Fs) clearBucketID(bucket string) {
|
|||||||
|
|
||||||
// Put the object into the bucket
|
// Put the object into the bucket
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -1205,7 +1181,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||||
return time.Since(time.Time(timestamp)).Hours() > 24
|
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete Config.Transfers in parallel
|
// Delete Config.Transfers in parallel
|
||||||
@@ -1334,9 +1313,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1478,19 +1457,22 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// Clean the SHA1
|
// Clean the SHA1
|
||||||
//
|
//
|
||||||
// Make sure it is lower case.
|
// Make sure it is lower case
|
||||||
//
|
//
|
||||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||||
// Some tools (e.g. Cyberduck) use this
|
// Some tools (e.g. Cyberduck) use this
|
||||||
func cleanSHA1(sha1 string) string {
|
func cleanSHA1(sha1 string) (out string) {
|
||||||
|
out = strings.ToLower(sha1)
|
||||||
const unverified = "unverified:"
|
const unverified = "unverified:"
|
||||||
return strings.TrimPrefix(strings.ToLower(sha1), unverified)
|
if strings.HasPrefix(out, unverified) {
|
||||||
|
out = out[len(unverified):]
|
||||||
|
}
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
//
|
|
||||||
// o.id
|
// o.id
|
||||||
// o.modTime
|
// o.modTime
|
||||||
// o.size
|
// o.size
|
||||||
@@ -1513,7 +1495,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
|||||||
// decodeMetaData sets the metadata in the object from an api.File
|
// decodeMetaData sets the metadata in the object from an api.File
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
//
|
|
||||||
// o.id
|
// o.id
|
||||||
// o.modTime
|
// o.modTime
|
||||||
// o.size
|
// o.size
|
||||||
@@ -1525,7 +1506,6 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
|
|||||||
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
|
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
//
|
|
||||||
// o.id
|
// o.id
|
||||||
// o.modTime
|
// o.modTime
|
||||||
// o.size
|
// o.size
|
||||||
@@ -1587,7 +1567,6 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
//
|
|
||||||
// o.id
|
// o.id
|
||||||
// o.modTime
|
// o.modTime
|
||||||
// o.size
|
// o.size
|
||||||
@@ -1849,9 +1828,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if o.fs.opt.Versions {
|
if o.fs.opt.Versions {
|
||||||
return errNotWithVersions
|
return errNotWithVersions
|
||||||
}
|
}
|
||||||
if o.fs.opt.VersionAt.IsSet() {
|
|
||||||
return errNotWithVersionAt
|
|
||||||
}
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
|
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
@@ -2007,9 +1983,6 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
if o.fs.opt.Versions {
|
if o.fs.opt.Versions {
|
||||||
return errNotWithVersions
|
return errNotWithVersions
|
||||||
}
|
}
|
||||||
if o.fs.opt.VersionAt.IsSet() {
|
|
||||||
return errNotWithVersionAt
|
|
||||||
}
|
|
||||||
if o.fs.opt.HardDelete {
|
if o.fs.opt.HardDelete {
|
||||||
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ import (
|
|||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -89,19 +88,21 @@ type largeUpload struct {
|
|||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
//
|
//
|
||||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||||
|
remote := o.remote
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := int64(0)
|
parts := int64(0)
|
||||||
sha1SliceSize := int64(maxParts)
|
sha1SliceSize := int64(maxParts)
|
||||||
chunkSize := defaultChunkSize
|
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||||
} else {
|
} else {
|
||||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
|
||||||
parts = size / int64(chunkSize)
|
parts = size / int64(chunkSize)
|
||||||
if size%int64(chunkSize) != 0 {
|
if size%int64(chunkSize) != 0 {
|
||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
|
if parts > maxParts {
|
||||||
|
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||||
|
}
|
||||||
sha1SliceSize = parts
|
sha1SliceSize = parts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ const (
|
|||||||
timeFormat = `"` + time.RFC3339 + `"`
|
timeFormat = `"` + time.RFC3339 + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents date and time information for the
|
// Time represents represents date and time information for the
|
||||||
// box API, by using RFC3339
|
// box API, by using RFC3339
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
|
|||||||
@@ -266,7 +266,7 @@ type Fs struct {
|
|||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
@@ -692,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
// must have setMetaData called on it
|
// must have setMetaData called on it
|
||||||
//
|
//
|
||||||
// Returns the object, leaf, directoryID and error.
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
@@ -752,7 +752,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -792,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
|
|
||||||
// PutUnchecked the object into the container
|
// PutUnchecked the object into the container
|
||||||
//
|
//
|
||||||
// This will produce an error if the object already exists.
|
// This will produce an error if the object already exists
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -877,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -897,7 +897,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||||
dstPath := f.rootSlash() + remote
|
dstPath := f.rootSlash() + remote
|
||||||
if strings.EqualFold(srcPath, dstPath) {
|
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -995,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1235,6 +1235,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
@@ -1345,9 +1346,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
|||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned.
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
if o.fs.tokenRenewer != nil {
|
if o.fs.tokenRenewer != nil {
|
||||||
o.fs.tokenRenewer.Start()
|
o.fs.tokenRenewer.Start()
|
||||||
|
|||||||
5
backend/cache/cache.go
vendored
5
backend/cache/cache.go
vendored
@@ -1,7 +1,6 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1129,7 +1128,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown object type %T", entry)
|
return fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1748,7 +1747,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -213,7 +213,7 @@ func (p *plexConnector) authenticate() error {
|
|||||||
var data map[string]interface{}
|
var data map[string]interface{}
|
||||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to obtain token: %w", err)
|
return fmt.Errorf("failed to obtain token: %v", err)
|
||||||
}
|
}
|
||||||
tokenGen, ok := get(data, "user", "authToken")
|
tokenGen, ok := get(data, "user", "authToken")
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
5
backend/cache/storage_memory.go
vendored
5
backend/cache/storage_memory.go
vendored
@@ -76,7 +76,10 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
|||||||
|
|
||||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||||
for key := range m.db.Items() {
|
var items map[string]cache.Item
|
||||||
|
|
||||||
|
items = m.db.Items()
|
||||||
|
for key := range items {
|
||||||
sepIdx := strings.LastIndex(key, "-")
|
sepIdx := strings.LastIndex(key, "-")
|
||||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
19
backend/cache/storage_persistent.go
vendored
19
backend/cache/storage_persistent.go
vendored
@@ -250,7 +250,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
|||||||
if val != nil {
|
if val != nil {
|
||||||
err := json.Unmarshal(val, cachedDir)
|
err := json.Unmarshal(val, cachedDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error during unmarshalling obj: %w", err)
|
return fmt.Errorf("error during unmarshalling obj: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
||||||
@@ -456,7 +456,10 @@ func (b *Persistent) HasEntry(remote string) bool {
|
|||||||
|
|
||||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
return fmt.Errorf("couldn't find object (%v)", remote)
|
||||||
})
|
})
|
||||||
return err == nil
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasChunk confirms the existence of a single chunk of an object
|
// HasChunk confirms the existence of a single chunk of an object
|
||||||
@@ -551,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||||
if dataTsBucket == nil {
|
if dataTsBucket == nil {
|
||||||
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
|
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||||
}
|
}
|
||||||
// iterate through ts
|
// iterate through ts
|
||||||
c := dataTsBucket.Cursor()
|
c := dataTsBucket.Cursor()
|
||||||
@@ -901,16 +904,16 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
|||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
|
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
tempObj.Started = false
|
tempObj.Started = false
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -966,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
|||||||
}
|
}
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//
|
||||||
// Chunker's composite files have one or more chunks
|
// Chunker's composite files have one or more chunks
|
||||||
// and optional metadata object. If it's present,
|
// and optional metadata object. If it's present,
|
||||||
// meta object is named after the original file.
|
// meta object is named after the original file.
|
||||||
@@ -64,7 +65,7 @@ import (
|
|||||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||||
//
|
//
|
||||||
// When transactions is set to the norename style, data chunks will
|
// When transactions is set to the norename style, data chunks will
|
||||||
// keep their temporary chunk names (with the transaction identifier
|
// keep their temporary chunk names (with the transacion identifier
|
||||||
// suffix). To distinguish them from temporary chunks, the txn field
|
// suffix). To distinguish them from temporary chunks, the txn field
|
||||||
// of the metadata file is set to match the transaction identifier of
|
// of the metadata file is set to match the transaction identifier of
|
||||||
// the data chunks.
|
// the data chunks.
|
||||||
@@ -78,6 +79,7 @@ import (
|
|||||||
// Metadata format v1 does not define any control chunk types,
|
// Metadata format v1 does not define any control chunk types,
|
||||||
// they are currently ignored aka reserved.
|
// they are currently ignored aka reserved.
|
||||||
// In future they can be used to implement resumable uploads etc.
|
// In future they can be used to implement resumable uploads etc.
|
||||||
|
//
|
||||||
const (
|
const (
|
||||||
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
||||||
tempSuffixFormat = `_%04s`
|
tempSuffixFormat = `_%04s`
|
||||||
@@ -513,7 +515,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
|
|
||||||
strRegex := regexp.QuoteMeta(pattern)
|
strRegex := regexp.QuoteMeta(pattern)
|
||||||
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
||||||
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
|
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
|
||||||
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
||||||
f.nameRegexp = regexp.MustCompile(strRegex)
|
f.nameRegexp = regexp.MustCompile(strRegex)
|
||||||
|
|
||||||
@@ -522,7 +524,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
if numDigits > 1 {
|
if numDigits > 1 {
|
||||||
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
||||||
}
|
}
|
||||||
strFmt := strings.ReplaceAll(pattern, "%", "%%")
|
strFmt := strings.Replace(pattern, "%", "%%", -1)
|
||||||
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
||||||
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
||||||
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
||||||
@@ -540,6 +542,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
//
|
//
|
||||||
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
||||||
// otherwise temporary chunk name is produced.
|
// otherwise temporary chunk name is produced.
|
||||||
|
//
|
||||||
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
||||||
dir, parentName := path.Split(filePath)
|
dir, parentName := path.Split(filePath)
|
||||||
var name, tempSuffix string
|
var name, tempSuffix string
|
||||||
@@ -705,6 +708,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
|||||||
// directory together with dead chunks.
|
// directory together with dead chunks.
|
||||||
// In future a flag named like `--chunker-list-hidden` may be added to
|
// In future a flag named like `--chunker-list-hidden` may be added to
|
||||||
// rclone that will tell List to reveal hidden chunks.
|
// rclone that will tell List to reveal hidden chunks.
|
||||||
|
//
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entries, err = f.base.List(ctx, dir)
|
entries, err = f.base.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -864,6 +868,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
// Note that chunker prefers analyzing file names rather than reading
|
// Note that chunker prefers analyzing file names rather than reading
|
||||||
// the content of meta object assuming that directory scans are fast
|
// the content of meta object assuming that directory scans are fast
|
||||||
// but opening even a small file can be slow on some backends.
|
// but opening even a small file can be slow on some backends.
|
||||||
|
//
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.scanObject(ctx, remote, false)
|
return f.scanObject(ctx, remote, false)
|
||||||
}
|
}
|
||||||
@@ -1079,7 +1084,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
|
|
||||||
// readXactID returns the transaction ID stored in the passed metadata object
|
// readXactID returns the transaction ID stored in the passed metadata object
|
||||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||||
// if xactID has already been read and cached return it now
|
// if xactID has already been read and cahced return it now
|
||||||
if o.xIDCached {
|
if o.xIDCached {
|
||||||
return o.xactID, nil
|
return o.xactID, nil
|
||||||
}
|
}
|
||||||
@@ -1581,6 +1586,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
// This command will chain to `purge` from wrapped remote.
|
// This command will chain to `purge` from wrapped remote.
|
||||||
// As a result it removes not only composite chunker files with their
|
// As a result it removes not only composite chunker files with their
|
||||||
// active chunks but also all hidden temporary chunks in the directory.
|
// active chunks but also all hidden temporary chunks in the directory.
|
||||||
|
//
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
do := f.base.Features().Purge
|
do := f.base.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
@@ -1622,6 +1628,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
// Unsupported control chunks will get re-picked by a more recent
|
// Unsupported control chunks will get re-picked by a more recent
|
||||||
// rclone version with unexpected results. This can be helped by
|
// rclone version with unexpected results. This can be helped by
|
||||||
// the `delete hidden` flag above or at least the user has been warned.
|
// the `delete hidden` flag above or at least the user has been warned.
|
||||||
|
//
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||||
// operations.Move can still call Remove if chunker's Move refuses
|
// operations.Move can still call Remove if chunker's Move refuses
|
||||||
@@ -1797,9 +1804,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1818,9 +1825,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1888,7 +1895,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.base.Features().CleanUp
|
do := f.base.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -1897,7 +1904,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.base.Features().About
|
do := f.base.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -2118,6 +2125,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
|||||||
// file, then tries to read it from metadata. This in theory
|
// file, then tries to read it from metadata. This in theory
|
||||||
// handles the unusual case when a small file has been tampered
|
// handles the unusual case when a small file has been tampered
|
||||||
// on the level of wrapped remote but chunker is unaware of that.
|
// on the level of wrapped remote but chunker is unaware of that.
|
||||||
|
//
|
||||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
return "", err // valid metadata is required to get hash, abort
|
return "", err // valid metadata is required to get hash, abort
|
||||||
@@ -2406,6 +2414,7 @@ type metaSimpleJSON struct {
|
|||||||
// - for files larger than chunk size
|
// - for files larger than chunk size
|
||||||
// - if file contents can be mistaken as meta object
|
// - if file contents can be mistaken as meta object
|
||||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||||
|
//
|
||||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
||||||
version := metadataVersion
|
version := metadataVersion
|
||||||
if xactID == "" && version == 2 {
|
if xactID == "" && version == 2 {
|
||||||
@@ -2438,6 +2447,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
|||||||
// New format will have a higher version number and cannot be correctly
|
// New format will have a higher version number and cannot be correctly
|
||||||
// handled by current implementation.
|
// handled by current implementation.
|
||||||
// The version check below will then explicitly ask user to upgrade rclone.
|
// The version check below will then explicitly ask user to upgrade rclone.
|
||||||
|
//
|
||||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||||
// Be strict about JSON format
|
// Be strict about JSON format
|
||||||
// to reduce possibility that a random small file resembles metadata.
|
// to reduce possibility that a random small file resembles metadata.
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|||||||
|
|
||||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||||
assert.NotNil(t, obj, message)
|
assert.NotNil(t, obj, message)
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
@@ -440,7 +440,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
|||||||
checkSmallFile := func(name, contents string) {
|
checkSmallFile := func(name, contents string) {
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||||
assert.NotNil(t, put)
|
assert.NotNil(t, put)
|
||||||
checkSmallFileInternals(put)
|
checkSmallFileInternals(put)
|
||||||
checkContents(put, contents)
|
checkContents(put, contents)
|
||||||
@@ -489,7 +489,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
|
|
||||||
newFile := func(name string) fs.Object {
|
newFile := func(name string) fs.Object {
|
||||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
@@ -599,7 +599,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
|||||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||||
filename = path.Join(dir, name)
|
filename = path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||||
txnID = chunkObj.xactID
|
txnID = chunkObj.xactID
|
||||||
@@ -716,7 +716,7 @@ func testFutureProof(t *testing.T, f *Fs) {
|
|||||||
name = f.makeChunkName(name, part-1, "", "")
|
name = f.makeChunkName(name, part-1, "", "")
|
||||||
}
|
}
|
||||||
item := fstest.Item{Path: name, ModTime: modTime}
|
item := fstest.Item{Path: name, ModTime: modTime}
|
||||||
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||||
assert.NotNil(t, obj, msg)
|
assert.NotNil(t, obj, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -790,7 +790,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
|||||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
return obj, filename
|
return obj, filename
|
||||||
}
|
}
|
||||||
@@ -844,7 +844,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
|||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||||
contents := "abcdef"
|
contents := "abcdef"
|
||||||
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||||
|
|
||||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||||
|
|||||||
@@ -35,7 +35,6 @@ func TestIntegration(t *testing.T) {
|
|||||||
"MimeType",
|
"MimeType",
|
||||||
"GetTier",
|
"GetTier",
|
||||||
"SetTier",
|
"SetTier",
|
||||||
"Metadata",
|
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"PublicLink",
|
"PublicLink",
|
||||||
@@ -54,7 +53,6 @@ func TestIntegration(t *testing.T) {
|
|||||||
{Name: name, Key: "type", Value: "chunker"},
|
{Name: name, Key: "type", Value: "chunker"},
|
||||||
{Name: name, Key: "remote", Value: tempDir},
|
{Name: name, Key: "remote", Value: tempDir},
|
||||||
}
|
}
|
||||||
opt.QuickTestOK = true
|
|
||||||
}
|
}
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,992 +0,0 @@
|
|||||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
|
||||||
package combine
|
|
||||||
|
|
||||||
/*
|
|
||||||
Have API to add/remove branches in the combine
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fs/walk"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fsi := &fs.RegInfo{
|
|
||||||
Name: "combine",
|
|
||||||
Description: "Combine several remotes into one",
|
|
||||||
NewFs: NewFs,
|
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "upstreams",
|
|
||||||
Help: `Upstreams for combining
|
|
||||||
|
|
||||||
These should be in the form
|
|
||||||
|
|
||||||
dir=remote:path dir2=remote2:path
|
|
||||||
|
|
||||||
Where before the = is specified the root directory and after is the remote to
|
|
||||||
put there.
|
|
||||||
|
|
||||||
Embedded spaces can be added using quotes
|
|
||||||
|
|
||||||
"dir=remote:path with space" "dir2=remote2:path with space"
|
|
||||||
|
|
||||||
`,
|
|
||||||
Required: true,
|
|
||||||
Default: fs.SpaceSepList(nil),
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
fs.Register(fsi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a combine of upstreams
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
features *fs.Features // optional features
|
|
||||||
opt Options // options for this Fs
|
|
||||||
root string // the path we are working on
|
|
||||||
hashSet hash.Set // common hashes
|
|
||||||
when time.Time // directory times
|
|
||||||
upstreams map[string]*upstream // map of upstreams
|
|
||||||
}
|
|
||||||
|
|
||||||
// adjustment stores the info to add a prefix to a path or chop characters off
|
|
||||||
type adjustment struct {
|
|
||||||
root string
|
|
||||||
rootSlash string
|
|
||||||
mountpoint string
|
|
||||||
mountpointSlash string
|
|
||||||
}
|
|
||||||
|
|
||||||
// newAdjustment makes a new path adjustment adjusting between mountpoint and root
|
|
||||||
//
|
|
||||||
// mountpoint is the point the upstream is mounted and root is the combine root
|
|
||||||
func newAdjustment(root, mountpoint string) (a adjustment) {
|
|
||||||
return adjustment{
|
|
||||||
root: root,
|
|
||||||
rootSlash: root + "/",
|
|
||||||
mountpoint: mountpoint,
|
|
||||||
mountpointSlash: mountpoint + "/",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNotUnderRoot = errors.New("file not under root")
|
|
||||||
|
|
||||||
// do makes the adjustment on s, mapping an upstream path into a combine path
|
|
||||||
func (a *adjustment) do(s string) (string, error) {
|
|
||||||
absPath := join(a.mountpoint, s)
|
|
||||||
if a.root == "" {
|
|
||||||
return absPath, nil
|
|
||||||
}
|
|
||||||
if absPath == a.root {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(absPath, a.rootSlash) {
|
|
||||||
return "", errNotUnderRoot
|
|
||||||
}
|
|
||||||
return absPath[len(a.rootSlash):], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// undo makes the adjustment on s, mapping a combine path into an upstream path
|
|
||||||
func (a *adjustment) undo(s string) (string, error) {
|
|
||||||
absPath := join(a.root, s)
|
|
||||||
if absPath == a.mountpoint {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(absPath, a.mountpointSlash) {
|
|
||||||
return "", errNotUnderRoot
|
|
||||||
}
|
|
||||||
return absPath[len(a.mountpointSlash):], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// upstream represents an upstream Fs
|
|
||||||
type upstream struct {
|
|
||||||
f fs.Fs
|
|
||||||
parent *Fs
|
|
||||||
dir string // directory the upstream is mounted
|
|
||||||
pathAdjustment adjustment // how to fiddle with the path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an upstream from the directory it is mounted on and the remote
|
|
||||||
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
|
|
||||||
uFs, err := cache.Get(ctx, remote)
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
|
|
||||||
}
|
|
||||||
u := &upstream{
|
|
||||||
f: uFs,
|
|
||||||
parent: f,
|
|
||||||
dir: dir,
|
|
||||||
pathAdjustment: newAdjustment(f.root, dir),
|
|
||||||
}
|
|
||||||
cache.PinUntilFinalized(u.f, u)
|
|
||||||
return u, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path.
|
|
||||||
//
|
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
|
||||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err = configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Backward compatible to old config
|
|
||||||
if len(opt.Upstreams) == 0 {
|
|
||||||
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
|
|
||||||
}
|
|
||||||
for _, u := range opt.Upstreams {
|
|
||||||
if strings.HasPrefix(u, name+":") {
|
|
||||||
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isDir := false
|
|
||||||
for strings.HasSuffix(root, "/") {
|
|
||||||
root = root[:len(root)-1]
|
|
||||||
isDir = true
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
|
|
||||||
when: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
var mu sync.Mutex
|
|
||||||
for _, upstream := range opt.Upstreams {
|
|
||||||
upstream := upstream
|
|
||||||
g.Go(func() (err error) {
|
|
||||||
equal := strings.IndexRune(upstream, '=')
|
|
||||||
if equal < 0 {
|
|
||||||
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
|
|
||||||
}
|
|
||||||
dir, remote := upstream[:equal], upstream[equal+1:]
|
|
||||||
if dir == "" {
|
|
||||||
return fmt.Errorf("empty dir in upstream definition %q", upstream)
|
|
||||||
}
|
|
||||||
if remote == "" {
|
|
||||||
return fmt.Errorf("empty remote in upstream definition %q", upstream)
|
|
||||||
}
|
|
||||||
if strings.ContainsRune(dir, '/') {
|
|
||||||
return fmt.Errorf("dirs can't contain / (yet): %q", dir)
|
|
||||||
}
|
|
||||||
u, err := f.newUpstream(gCtx, dir, remote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
if _, found := f.upstreams[dir]; found {
|
|
||||||
err = fmt.Errorf("duplicate directory name %q", dir)
|
|
||||||
} else {
|
|
||||||
f.upstreams[dir] = u
|
|
||||||
}
|
|
||||||
mu.Unlock()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
err = g.Wait()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// check features
|
|
||||||
var features = (&fs.Features{
|
|
||||||
CaseInsensitive: true,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: true,
|
|
||||||
WriteMimeType: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
BucketBased: true,
|
|
||||||
SetTier: true,
|
|
||||||
GetTier: true,
|
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
canMove := true
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
|
||||||
if !operations.CanServerSideMove(u.f) {
|
|
||||||
canMove = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We can move if all remotes support Move or Copy
|
|
||||||
if canMove {
|
|
||||||
features.Move = f.Move
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable ListR when upstreams either support ListR or is local
|
|
||||||
// But not when all upstreams are local
|
|
||||||
if features.ListR == nil {
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
if u.f.Features().ListR != nil {
|
|
||||||
features.ListR = f.ListR
|
|
||||||
} else if !u.f.Features().IsLocal {
|
|
||||||
features.ListR = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable Purge when any upstreams support it
|
|
||||||
if features.Purge == nil {
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
if u.f.Features().Purge != nil {
|
|
||||||
features.Purge = f.Purge
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable Shutdown when any upstreams support it
|
|
||||||
if features.Shutdown == nil {
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
if u.f.Features().Shutdown != nil {
|
|
||||||
features.Shutdown = f.Shutdown
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable DirCacheFlush when any upstreams support it
|
|
||||||
if features.DirCacheFlush == nil {
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
if u.f.Features().DirCacheFlush != nil {
|
|
||||||
features.DirCacheFlush = f.DirCacheFlush
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable ChangeNotify when any upstreams support it
|
|
||||||
if features.ChangeNotify == nil {
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
if u.f.Features().ChangeNotify != nil {
|
|
||||||
features.ChangeNotify = f.ChangeNotify
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = features
|
|
||||||
|
|
||||||
// Get common intersection of hashes
|
|
||||||
var hashSet hash.Set
|
|
||||||
var first = true
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
if first {
|
|
||||||
hashSet = u.f.Hashes()
|
|
||||||
first = false
|
|
||||||
} else {
|
|
||||||
hashSet = hashSet.Overlap(u.f.Hashes())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.hashSet = hashSet
|
|
||||||
|
|
||||||
// Check to see if the root is actually a file
|
|
||||||
if f.root != "" && !isDir {
|
|
||||||
_, err := f.NewObject(ctx, "")
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
|
|
||||||
// File doesn't exist or is a directory so return old f
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check to see if the root path is actually an existing file
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
// Adjust path adjustment to remove leaf
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
u.pathAdjustment = newAdjustment(f.root, u.dir)
|
|
||||||
}
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run a function over all the upstreams in parallel
|
|
||||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
u := u
|
|
||||||
g.Go(func() (err error) {
|
|
||||||
return fn(gCtx, u)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return g.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// join the elements together but unline path.Join return empty string
|
|
||||||
func join(elem ...string) string {
|
|
||||||
result := path.Join(elem...)
|
|
||||||
if result == "." {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if len(result) > 0 && result[0] == '/' {
|
|
||||||
result = result[1:]
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// find the upstream for the remote passed in, returning the upstream and the adjusted path
|
|
||||||
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
|
|
||||||
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
uRemote, err = u.pathAdjustment.undo(remote)
|
|
||||||
if err == nil {
|
|
||||||
return u, uRemote, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("combine root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
// The root always exists
|
|
||||||
if f.root == "" && dir == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return u.f.Rmdir(ctx, uRemote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return f.hashSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
// The root always exists
|
|
||||||
if f.root == "" && dir == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return u.f.Mkdir(ctx, uRemote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// purge the upstream or fallback to a slow way
|
|
||||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
|
||||||
if do := u.f.Features().Purge; do != nil {
|
|
||||||
err = do(ctx, dir)
|
|
||||||
} else {
|
|
||||||
err = operations.Purge(ctx, u.f, dir)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge all files in the directory
|
|
||||||
//
|
|
||||||
// Implement this if you have a way of deleting all the files
|
|
||||||
// quicker than just running Remove() on the result of List()
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
if f.root == "" && dir == "" {
|
|
||||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
|
||||||
return u.purge(ctx, "")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return u.purge(ctx, uRemote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
srcObj, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
dstU, dstRemote, err := f.findUpstream(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
do := dstU.f.Features().Copy
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dstU.newObject(o), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
srcObj, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
|
|
||||||
dstU, dstRemote, err := f.findUpstream(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
do := dstU.f.Features().Move
|
|
||||||
useCopy := false
|
|
||||||
if do == nil {
|
|
||||||
do = dstU.f.Features().Copy
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
useCopy = true
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If did Copy then remove the source object
|
|
||||||
if useCopy {
|
|
||||||
err = srcObj.Remove(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dstU.newObject(o), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
||||||
// using server-side move operations.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
|
||||||
//
|
|
||||||
// If destination exists then return fs.ErrorDirExists
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|
||||||
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't move directory - not same remote type")
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
|
|
||||||
dstU, dstURemote, err := f.findUpstream(dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
do := dstU.f.Features().DirMove
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
|
|
||||||
return do(ctx, srcU.f, srcURemote, dstURemote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path
|
|
||||||
// that has had changes. If the implementation
|
|
||||||
// uses polling, it should adhere to the given interval.
|
|
||||||
// At least one value will be written to the channel,
|
|
||||||
// specifying the initial value and updated values might
|
|
||||||
// follow. A 0 Duration should pause the polling.
|
|
||||||
// The ChangeNotify implementation must empty the channel
|
|
||||||
// regularly. When the channel gets closed, the implementation
|
|
||||||
// should stop polling and release resources.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
|
||||||
var uChans []chan time.Duration
|
|
||||||
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
u := u
|
|
||||||
if do := u.f.Features().ChangeNotify; do != nil {
|
|
||||||
ch := make(chan time.Duration)
|
|
||||||
uChans = append(uChans, ch)
|
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
|
||||||
newPath, err := u.pathAdjustment.do(path)
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(f, "ChangeNotify: unable to process %q: %s", path, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "ChangeNotify: path %q entryType %d", newPath, entryType)
|
|
||||||
notifyFunc(newPath, entryType)
|
|
||||||
}
|
|
||||||
do(ctx, wrappedNotifyFunc, ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for i := range ch {
|
|
||||||
for _, c := range uChans {
|
|
||||||
c <- i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, c := range uChans {
|
|
||||||
close(c)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
|
||||||
// as an optional interface
|
|
||||||
func (f *Fs) DirCacheFlush() {
|
|
||||||
ctx := context.Background()
|
|
||||||
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
|
||||||
if do := u.f.Features().DirCacheFlush; do != nil {
|
|
||||||
do()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
srcPath := src.Remote()
|
|
||||||
u, uRemote, err := f.findUpstream(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
|
||||||
var o fs.Object
|
|
||||||
if stream {
|
|
||||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
|
||||||
} else {
|
|
||||||
o, err = u.f.Put(ctx, in, uSrc, options...)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return u.newObject(o), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
o, err := f.NewObject(ctx, src.Remote())
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
case fs.ErrorObjectNotFound:
|
|
||||||
return f.put(ctx, in, src, false, options...)
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
o, err := f.NewObject(ctx, src.Remote())
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
case fs.ErrorObjectNotFound:
|
|
||||||
return f.put(ctx, in, src, true, options...)
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|
||||||
usage := &fs.Usage{
|
|
||||||
Total: new(int64),
|
|
||||||
Used: new(int64),
|
|
||||||
Trashed: new(int64),
|
|
||||||
Other: new(int64),
|
|
||||||
Free: new(int64),
|
|
||||||
Objects: new(int64),
|
|
||||||
}
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
doAbout := u.f.Features().About
|
|
||||||
if doAbout == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
usg, err := doAbout(ctx)
|
|
||||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if usg.Total != nil && usage.Total != nil {
|
|
||||||
*usage.Total += *usg.Total
|
|
||||||
} else {
|
|
||||||
usage.Total = nil
|
|
||||||
}
|
|
||||||
if usg.Used != nil && usage.Used != nil {
|
|
||||||
*usage.Used += *usg.Used
|
|
||||||
} else {
|
|
||||||
usage.Used = nil
|
|
||||||
}
|
|
||||||
if usg.Trashed != nil && usage.Trashed != nil {
|
|
||||||
*usage.Trashed += *usg.Trashed
|
|
||||||
} else {
|
|
||||||
usage.Trashed = nil
|
|
||||||
}
|
|
||||||
if usg.Other != nil && usage.Other != nil {
|
|
||||||
*usage.Other += *usg.Other
|
|
||||||
} else {
|
|
||||||
usage.Other = nil
|
|
||||||
}
|
|
||||||
if usg.Free != nil && usage.Free != nil {
|
|
||||||
*usage.Free += *usg.Free
|
|
||||||
} else {
|
|
||||||
usage.Free = nil
|
|
||||||
}
|
|
||||||
if usg.Objects != nil && usage.Objects != nil {
|
|
||||||
*usage.Objects += *usg.Objects
|
|
||||||
} else {
|
|
||||||
usage.Objects = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return usage, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wraps entries for this upstream
|
|
||||||
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
|
|
||||||
for i, entry := range entries {
|
|
||||||
switch x := entry.(type) {
|
|
||||||
case fs.Object:
|
|
||||||
entries[i] = u.newObject(x)
|
|
||||||
case fs.Directory:
|
|
||||||
newDir := fs.NewDirCopy(ctx, x)
|
|
||||||
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir.SetRemote(newPath)
|
|
||||||
entries[i] = newDir
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
|
||||||
if f.root == "" && dir == "" {
|
|
||||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
|
||||||
for combineDir := range f.upstreams {
|
|
||||||
d := fs.NewDir(combineDir, f.when)
|
|
||||||
entries = append(entries, d)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entries, err = u.f.List(ctx, uRemote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return u.wrapEntries(ctx, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
|
||||||
// from dir recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
//
|
|
||||||
// Don't implement this unless you have a more efficient way
|
|
||||||
// of listing recursively that doing a directory traversal.
|
|
||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
|
||||||
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
|
|
||||||
if f.root == "" && dir == "" {
|
|
||||||
rootEntries, err := f.List(ctx, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = callback(rootEntries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var mu sync.Mutex
|
|
||||||
syncCallback := func(entries fs.DirEntries) error {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
return callback(entries)
|
|
||||||
}
|
|
||||||
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
|
||||||
return f.ListR(ctx, u.dir, syncCallback)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wrapCallback := func(entries fs.DirEntries) error {
|
|
||||||
entries, err := u.wrapEntries(ctx, entries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
|
||||||
if do := u.f.Features().ListR; do != nil {
|
|
||||||
err = do(ctx, uRemote, wrapCallback)
|
|
||||||
} else {
|
|
||||||
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
|
|
||||||
}
|
|
||||||
if err == fs.ErrorDirNotFound {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject creates a new remote combine file object
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
u, uRemote, err := f.findUpstream(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if uRemote == "" || strings.HasSuffix(uRemote, "/") {
|
|
||||||
return nil, fs.ErrorIsDir
|
|
||||||
}
|
|
||||||
o, err := u.f.NewObject(ctx, uRemote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return u.newObject(o), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision is the greatest Precision of all upstreams
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
var greatestPrecision time.Duration
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
uPrecision := u.f.Precision()
|
|
||||||
if uPrecision > greatestPrecision {
|
|
||||||
greatestPrecision = uPrecision
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return greatestPrecision
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
|
||||||
if do := u.f.Features().Shutdown; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a wrapped Object
|
|
||||||
//
|
|
||||||
// This is a wrapped Object which knows its path prefix
|
|
||||||
type Object struct {
|
|
||||||
fs.Object
|
|
||||||
u *upstream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *upstream) newObject(o fs.Object) *Object {
|
|
||||||
return &Object{
|
|
||||||
Object: o,
|
|
||||||
u: u,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.u.parent
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the remote path
|
|
||||||
func (o *Object) String() string {
|
|
||||||
return o.Remote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(o, "Bad object: %v", err)
|
|
||||||
return err.Error()
|
|
||||||
}
|
|
||||||
return newPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if known
|
|
||||||
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
|
|
||||||
if do, ok := o.Object.(fs.MimeTyper); ok {
|
|
||||||
mimeType = do.MimeType(ctx)
|
|
||||||
}
|
|
||||||
return mimeType
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Object that this Object is wrapping or
|
|
||||||
// nil if it isn't wrapping anything
|
|
||||||
func (o *Object) UnWrap() fs.Object {
|
|
||||||
return o.Object
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTier returns storage tier or class of the Object
|
|
||||||
func (o *Object) GetTier() string {
|
|
||||||
do, ok := o.Object.(fs.GetTierer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.GetTier()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *Object) ID() string {
|
|
||||||
do, ok := o.Object.(fs.IDer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.Object.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier performs changing storage tier of the Object if
|
|
||||||
// multiple storage classes supported
|
|
||||||
func (o *Object) SetTier(tier string) error {
|
|
||||||
do, ok := o.Object.(fs.SetTierer)
|
|
||||||
if !ok {
|
|
||||||
return errors.New("underlying remote does not support SetTier")
|
|
||||||
}
|
|
||||||
return do.SetTier(tier)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.FullObject = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
package combine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAdjustmentDo(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
root string
|
|
||||||
mountpoint string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "mountpoint/path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "wrongpath/to/file.txt",
|
|
||||||
want: "",
|
|
||||||
wantErr: errNotUnderRoot,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
what := fmt.Sprintf("%+v", test)
|
|
||||||
a := newAdjustment(test.root, test.mountpoint)
|
|
||||||
got, gotErr := a.do(test.in)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
assert.Equal(t, test.want, got, what)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAdjustmentUndo(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
root string
|
|
||||||
mountpoint string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "mountpoint/path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "wrongmountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "to/file.txt",
|
|
||||||
want: "",
|
|
||||||
wantErr: errNotUnderRoot,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
what := fmt.Sprintf("%+v", test)
|
|
||||||
a := newAdjustment(test.root, test.mountpoint)
|
|
||||||
got, gotErr := a.undo(test.in)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
assert.Equal(t, test.want, got, what)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
// Test Combine filesystem interface
|
|
||||||
package combine_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
_ "github.com/rclone/rclone/backend/memory"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
t.Skip("Skipping as -remote not set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLocal(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
dirs := MakeTestDirs(t, 3)
|
|
||||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
|
|
||||||
name := "TestCombineLocal"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemory(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
|
|
||||||
name := "TestCombineMemory"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMixed(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
dirs := MakeTestDirs(t, 2)
|
|
||||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
|
|
||||||
name := "TestCombineMixed"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeTestDirs makes directories in /tmp for testing
|
|
||||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
|
||||||
for i := 1; i <= n; i++ {
|
|
||||||
dir := t.TempDir()
|
|
||||||
dirs = append(dirs, dir)
|
|
||||||
}
|
|
||||||
return dirs
|
|
||||||
}
|
|
||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
@@ -54,7 +53,7 @@ const (
|
|||||||
Gzip = 2
|
Gzip = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
@@ -71,9 +70,6 @@ func init() {
|
|||||||
Name: "compress",
|
Name: "compress",
|
||||||
Description: "Compress a remote",
|
Description: "Compress a remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to compress.",
|
Help: "Remote to compress.",
|
||||||
@@ -91,7 +87,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
|||||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||||
generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||||
are doing.
|
are doing.
|
||||||
Level 0 turns off compression.`,
|
Level 0 turns off compression.`,
|
||||||
Default: sgzip.DefaultCompression,
|
Default: sgzip.DefaultCompression,
|
||||||
@@ -131,7 +127,7 @@ type Fs struct {
|
|||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -184,9 +180,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
SetTier: true,
|
SetTier: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// We support reading MIME types no matter the wrapped fs
|
// We support reading MIME types no matter the wrapped fs
|
||||||
f.features.ReadMimeType = true
|
f.features.ReadMimeType = true
|
||||||
@@ -229,7 +222,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
|||||||
// Separate the filename and size from the extension
|
// Separate the filename and size from the extension
|
||||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||||
if extensionPos == -1 {
|
if extensionPos == -1 {
|
||||||
return "", "", 0, errors.New("file name has no extension")
|
return "", "", 0, errors.New("File name has no extension")
|
||||||
}
|
}
|
||||||
extension = compressedFileName[extensionPos:]
|
extension = compressedFileName[extensionPos:]
|
||||||
nameWithSize := compressedFileName[:extensionPos]
|
nameWithSize := compressedFileName[:extensionPos]
|
||||||
@@ -238,11 +231,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
|||||||
}
|
}
|
||||||
match := nameRegexp.FindStringSubmatch(nameWithSize)
|
match := nameRegexp.FindStringSubmatch(nameWithSize)
|
||||||
if match == nil || len(match) != 3 {
|
if match == nil || len(match) != 3 {
|
||||||
return "", "", 0, errors.New("invalid filename")
|
return "", "", 0, errors.New("Invalid filename")
|
||||||
}
|
}
|
||||||
size, err := base64ToInt64(match[2])
|
size, err := base64ToInt64(match[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", 0, errors.New("could not decode size")
|
return "", "", 0, errors.New("Could not decode size")
|
||||||
}
|
}
|
||||||
return match[1], gzFileExt, size, nil
|
return match[1], gzFileExt, size, nil
|
||||||
}
|
}
|
||||||
@@ -311,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(&newEntries, x)
|
f.addDir(&newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
@@ -368,16 +361,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
meta, err := readMetadata(ctx, mo)
|
meta := readMetadata(ctx, mo)
|
||||||
if err != nil {
|
if meta == nil {
|
||||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
return nil, errors.New("error decoding metadata")
|
||||||
}
|
}
|
||||||
// Create our Object
|
// Create our Object
|
||||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||||
if err != nil {
|
return f.newObject(o, mo, meta), err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.newObject(o, mo, meta), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||||
@@ -455,7 +445,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to include what we already read
|
// Need to include what we allready read
|
||||||
in = &ReadCloserWrapper{
|
in = &ReadCloserWrapper{
|
||||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||||
Closer: in,
|
Closer: in,
|
||||||
@@ -476,10 +466,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
_ = os.Remove(tempFile.Name())
|
_ = os.Remove(tempFile.Name())
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err)
|
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err = io.Copy(tempFile, in); err != nil {
|
if _, err = io.Copy(tempFile, in); err != nil {
|
||||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -681,7 +671,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.newObject(dataObject, mo, meta), nil
|
return f.newObject(dataObject, mo, meta), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// Put in to the remote path with the modTime given of the given size
|
||||||
@@ -730,23 +720,23 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
||||||
err = oldObj.(*Object).Object.Remove(ctx)
|
err = oldObj.(*Object).Object.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't remove original object: %w", err)
|
return nil, fmt.Errorf("Could remove original object: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If our new object is compressed we have to rename it with the correct size.
|
// If our new object is compressed we have to rename it with the correct size.
|
||||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||||
if compressible {
|
if compressible {
|
||||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't rename streamed object: %w", err)
|
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
|
||||||
}
|
}
|
||||||
newObj.Object = wrapObj
|
newObj.Object = wrapObj
|
||||||
}
|
}
|
||||||
return newObj, nil
|
return newObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||||
// will break stuff. Right no I can't think of a way to make this work.
|
// will break stuff. Right no I can't think of a way to make this work.
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
@@ -789,9 +779,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -839,9 +829,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -916,7 +906,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("can't CleanUp: not supported by underlying remote")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -925,7 +915,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("can't About: not supported by underlying remote")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -1044,19 +1034,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This function will read the metadata from a metadata object.
|
// This function will read the metadata from a metadata object.
|
||||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||||
// Open our meradata object
|
// Open our meradata object
|
||||||
rc, err := mo.Open(ctx)
|
rc, err := mo.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(rc, &err)
|
defer func() {
|
||||||
|
err := rc.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(mo, "Error closing object: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
jr := json.NewDecoder(rc)
|
jr := json.NewDecoder(rc)
|
||||||
meta = new(ObjectMetadata)
|
meta = new(ObjectMetadata)
|
||||||
if err = jr.Decode(meta); err != nil {
|
if err = jr.Decode(meta); err != nil {
|
||||||
return nil, err
|
return nil
|
||||||
}
|
}
|
||||||
return meta, nil
|
return meta
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes this object
|
// Remove removes this object
|
||||||
@@ -1101,9 +1096,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
origName := o.Remote()
|
origName := o.Remote()
|
||||||
if o.meta.Mode != Uncompressed || compressible {
|
if o.meta.Mode != Uncompressed || compressible {
|
||||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if newObject.Object.Remote() != o.Object.Remote() {
|
if newObject.Object.Remote() != o.Object.Remote() {
|
||||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||||
return removeErr
|
return removeErr
|
||||||
@@ -1117,10 +1109,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
// If we are, just update the object and metadata
|
// If we are, just update the object and metadata
|
||||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Update object metadata and return
|
// Update object metadata and return
|
||||||
o.Object = newObject.Object
|
o.Object = newObject.Object
|
||||||
o.meta = newObject.meta
|
o.meta = newObject.meta
|
||||||
@@ -1130,9 +1122,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||||
if o == nil {
|
|
||||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
|
||||||
}
|
|
||||||
return &Object{
|
return &Object{
|
||||||
Object: o,
|
Object: o,
|
||||||
f: f,
|
f: f,
|
||||||
@@ -1145,9 +1134,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
|||||||
|
|
||||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||||
if o == nil {
|
|
||||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
|
||||||
}
|
|
||||||
return &Object{
|
return &Object{
|
||||||
Object: o,
|
Object: o,
|
||||||
f: f,
|
f: f,
|
||||||
@@ -1175,7 +1161,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if o.meta == nil {
|
if o.meta == nil {
|
||||||
o.meta, err = readMetadata(ctx, o.mo)
|
o.meta = readMetadata(ctx, o.mo)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1228,21 +1214,6 @@ func (o *Object) MimeType(ctx context.Context) string {
|
|||||||
return o.meta.MimeType
|
return o.meta.MimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
err := o.loadMetadataIfNotLoaded(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
do, ok := o.mo.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
@@ -1389,51 +1360,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|||||||
return "", nil // cannot know the checksum
|
return "", nil // cannot know the checksum
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *ObjectInfo) ID() string {
|
|
||||||
do, ok := o.src.(fs.IDer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
|
||||||
do, ok := o.src.(fs.MimeTyper)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.MimeType(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Object that this Object is wrapping or
|
|
||||||
// nil if it isn't wrapping anything
|
|
||||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
|
||||||
return fs.UnWrapObjectInfo(o.src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.src.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTier returns storage tier or class of the Object
|
|
||||||
func (o *ObjectInfo) GetTier() string {
|
|
||||||
do, ok := o.src.(fs.GetTierer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.GetTier()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
func (o *Object) ID() string {
|
func (o *Object) ID() string {
|
||||||
do, ok := o.Object.(fs.IDer)
|
do, ok := o.Object.(fs.IDer)
|
||||||
@@ -1486,6 +1412,11 @@ var (
|
|||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.GetTierer = (*Object)(nil)
|
||||||
|
_ fs.SetTierer = (*Object)(nil)
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
_ fs.MimeTyper = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -61,6 +61,5 @@ func TestRemoteGzip(t *testing.T) {
|
|||||||
{Name: name, Key: "remote", Value: tempdir},
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
},
|
},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
|||||||
case "obfuscate":
|
case "obfuscate":
|
||||||
mode = NameEncryptionObfuscated
|
mode = NameEncryptionObfuscated
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unknown file name encryption mode %q", s)
|
err = fmt.Errorf("Unknown file name encryption mode %q", s)
|
||||||
}
|
}
|
||||||
return mode, err
|
return mode, err
|
||||||
}
|
}
|
||||||
@@ -127,11 +127,11 @@ type fileNameEncoding interface {
|
|||||||
// RFC4648
|
// RFC4648
|
||||||
//
|
//
|
||||||
// The standard encoding is modified in two ways
|
// The standard encoding is modified in two ways
|
||||||
// - it becomes lower case (no-one likes upper case filenames!)
|
// * it becomes lower case (no-one likes upper case filenames!)
|
||||||
// - we strip the padding character `=`
|
// * we strip the padding character `=`
|
||||||
type caseInsensitiveBase32Encoding struct{}
|
type caseInsensitiveBase32Encoding struct{}
|
||||||
|
|
||||||
// EncodeToString encodes a string using the modified version of
|
// EncodeToString encodes a strign using the modified version of
|
||||||
// base32 encoding.
|
// base32 encoding.
|
||||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||||
encoded := base32.HexEncoding.EncodeToString(src)
|
encoded := base32.HexEncoding.EncodeToString(src)
|
||||||
@@ -162,7 +162,7 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
|||||||
case "base32768":
|
case "base32768":
|
||||||
enc = base32768.SafeEncoding
|
enc = base32768.SafeEncoding
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unknown file name encoding mode %q", s)
|
err = fmt.Errorf("Unknown file name encoding mode %q", s)
|
||||||
}
|
}
|
||||||
return enc, err
|
return enc, err
|
||||||
}
|
}
|
||||||
@@ -244,7 +244,7 @@ func (c *Cipher) putBlock(buf []byte) {
|
|||||||
|
|
||||||
// encryptSegment encrypts a path segment
|
// encryptSegment encrypts a path segment
|
||||||
//
|
//
|
||||||
// This uses EME with AES.
|
// This uses EME with AES
|
||||||
//
|
//
|
||||||
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
||||||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||||
@@ -254,8 +254,8 @@ func (c *Cipher) putBlock(buf []byte) {
|
|||||||
// same filename must encrypt to the same thing.
|
// same filename must encrypt to the same thing.
|
||||||
//
|
//
|
||||||
// This means that
|
// This means that
|
||||||
// - filenames with the same name will encrypt the same
|
// * filenames with the same name will encrypt the same
|
||||||
// - filenames which start the same won't have a common prefix
|
// * filenames which start the same won't have a common prefix
|
||||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||||
if plaintext == "" {
|
if plaintext == "" {
|
||||||
return ""
|
return ""
|
||||||
@@ -1085,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
|||||||
|
|
||||||
// DecryptDataSeek decrypts the data stream from offset
|
// DecryptDataSeek decrypts the data stream from offset
|
||||||
//
|
//
|
||||||
// The open function must return a ReadCloser opened to the offset supplied.
|
// The open function must return a ReadCloser opened to the offset supplied
|
||||||
//
|
//
|
||||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||||
|
|||||||
@@ -28,9 +28,6 @@ func init() {
|
|||||||
Description: "Encrypt/Decrypt a remote",
|
Description: "Encrypt/Decrypt a remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
@@ -125,7 +122,7 @@ names, or for debugging purposes.`,
|
|||||||
|
|
||||||
This option could help with shortening the encrypted filename. The
|
This option could help with shortening the encrypted filename. The
|
||||||
suitable option would depend on the way your remote count the filename
|
suitable option would depend on the way your remote count the filename
|
||||||
length and if it's case sensitive.`,
|
length and if it's case sensitve.`,
|
||||||
Default: "base32",
|
Default: "base32",
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
@@ -244,9 +241,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
@@ -334,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(ctx, &newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
@@ -507,9 +501,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -532,9 +526,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -603,7 +597,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -612,7 +606,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -1062,50 +1056,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTier returns storage tier or class of the Object
|
|
||||||
func (o *ObjectInfo) GetTier() string {
|
|
||||||
do, ok := o.ObjectInfo.(fs.GetTierer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.GetTier()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *ObjectInfo) ID() string {
|
|
||||||
do, ok := o.ObjectInfo.(fs.IDer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.ObjectInfo.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
//
|
|
||||||
// This is deliberately unsupported so we don't leak mime type info by
|
|
||||||
// default.
|
|
||||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Object that this Object is wrapping or
|
|
||||||
// nil if it isn't wrapping anything
|
|
||||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
|
||||||
return fs.UnWrapObjectInfo(o.ObjectInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
func (o *Object) ID() string {
|
func (o *Object) ID() string {
|
||||||
do, ok := o.Object.(fs.IDer)
|
do, ok := o.Object.(fs.IDer)
|
||||||
@@ -1134,26 +1084,6 @@ func (o *Object) GetTier() string {
|
|||||||
return do.GetTier()
|
return do.GetTier()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.Object.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
//
|
|
||||||
// This is deliberately unsupported so we don't leak mime type info by
|
|
||||||
// default.
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -1176,6 +1106,10 @@ var (
|
|||||||
_ fs.UserInfoer = (*Fs)(nil)
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
_ fs.SetTierer = (*Object)(nil)
|
||||||
|
_ fs.GetTierer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -91,9 +91,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
src := f.newObjectInfo(oi, nonce)
|
src := f.newObjectInfo(oi, nonce)
|
||||||
|
|
||||||
// Test ObjectInfo methods
|
// Test ObjectInfo methods
|
||||||
if !f.opt.NoDataEncryption {
|
|
||||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||||
}
|
|
||||||
assert.Equal(t, f, src.Fs())
|
assert.Equal(t, f, src.Fs())
|
||||||
assert.NotEqual(t, path, src.Remote())
|
assert.NotEqual(t, path, src.Remote())
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ package crypt_test
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
@@ -47,7 +46,6 @@ func TestStandardBase32(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +67,6 @@ func TestStandardBase64(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -91,7 +88,6 @@ func TestStandardBase32768(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,7 +109,6 @@ func TestOff(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,9 +117,6 @@ func TestObfuscate(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||||
name := "TestCrypt3"
|
name := "TestCrypt3"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -139,7 +131,6 @@ func TestObfuscate(t *testing.T) {
|
|||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,9 +139,6 @@ func TestNoDataObfuscate(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||||
name := "TestCrypt4"
|
name := "TestCrypt4"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -166,6 +154,5 @@ func TestNoDataObfuscate(t *testing.T) {
|
|||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,11 +8,11 @@ import "errors"
|
|||||||
|
|
||||||
// Errors Unpad can return
|
// Errors Unpad can return
|
||||||
var (
|
var (
|
||||||
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
|
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
|
||||||
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
|
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
|
||||||
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
|
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
|
||||||
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
|
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
|
||||||
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
|
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Pad buf using PKCS#7 to a multiple of n.
|
// Pad buf using PKCS#7 to a multiple of n.
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ import (
|
|||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -51,7 +50,6 @@ import (
|
|||||||
drive_v2 "google.golang.org/api/drive/v2"
|
drive_v2 "google.golang.org/api/drive/v2"
|
||||||
drive "google.golang.org/api/drive/v3"
|
drive "google.golang.org/api/drive/v3"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/option"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -72,7 +70,7 @@ const (
|
|||||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||||
defaultChunkSize = 8 * fs.Mebi
|
defaultChunkSize = 8 * fs.Mebi
|
||||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
|
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||||
defaultXDGIcon = "text-html"
|
defaultXDGIcon = "text-html"
|
||||||
@@ -278,7 +276,6 @@ Leave blank normally.
|
|||||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||||
a non root folder as its starting point.
|
a non root folder as its starting point.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_file",
|
Name: "service_account_file",
|
||||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||||
@@ -567,27 +564,6 @@ If this is set then rclone will not show any dangling shortcuts in listings.
|
|||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
|
||||||
Name: "resource_key",
|
|
||||||
Help: `Resource key for accessing a link-shared file.
|
|
||||||
|
|
||||||
If you need to access files shared with a link like this
|
|
||||||
|
|
||||||
https://drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing
|
|
||||||
|
|
||||||
Then you will need to use the first part "XXX" as the "root_folder_id"
|
|
||||||
and the second part "YYY" as the "resource_key" otherwise you will get
|
|
||||||
404 not found errors when trying to access the directory.
|
|
||||||
|
|
||||||
See: https://developers.google.com/drive/api/guides/resource-keys
|
|
||||||
|
|
||||||
This resource key requirement only applies to a subset of old files.
|
|
||||||
|
|
||||||
Note also that opening the folder once in the web interface (with the
|
|
||||||
user you've authenticated rclone with) seems to be enough so that the
|
|
||||||
resource key is no needed.
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -649,7 +625,6 @@ type Options struct {
|
|||||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||||
ResourceKey string `config:"resource_key"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -675,7 +650,6 @@ type Fs struct {
|
|||||||
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
||||||
listRmu *sync.Mutex // protects listRempties
|
listRmu *sync.Mutex // protects listRempties
|
||||||
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
||||||
dirResourceKeys *sync.Map // map directory ID to resource key
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type baseObject struct {
|
type baseObject struct {
|
||||||
@@ -686,7 +660,6 @@ type baseObject struct {
|
|||||||
mimeType string // The object MIME type
|
mimeType string // The object MIME type
|
||||||
bytes int64 // size of the object
|
bytes int64 // size of the object
|
||||||
parents []string // IDs of the parent directories
|
parents []string // IDs of the parent directories
|
||||||
resourceKey *string // resourceKey is needed for link shared objects
|
|
||||||
}
|
}
|
||||||
type documentObject struct {
|
type documentObject struct {
|
||||||
baseObject
|
baseObject
|
||||||
@@ -758,9 +731,6 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||||
fs.Errorf(f, "Received download limit error: %v", err)
|
fs.Errorf(f, "Received download limit error: %v", err)
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
|
||||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
|
||||||
return false, fserrors.FatalError(err)
|
|
||||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||||
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
|
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
@@ -830,7 +800,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
|||||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||||
parentsQuery := bytes.NewBufferString("(")
|
parentsQuery := bytes.NewBufferString("(")
|
||||||
var resourceKeys []string
|
|
||||||
for _, dirID := range dirIDs {
|
for _, dirID := range dirIDs {
|
||||||
if dirID == "" {
|
if dirID == "" {
|
||||||
continue
|
continue
|
||||||
@@ -851,12 +820,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
|||||||
} else {
|
} else {
|
||||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||||
}
|
}
|
||||||
resourceKey, hasResourceKey := f.dirResourceKeys.Load(dirID)
|
|
||||||
if hasResourceKey {
|
|
||||||
resourceKeys = append(resourceKeys, fmt.Sprintf("%s/%s", dirID, resourceKey))
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
resourceKeysHeader := strings.Join(resourceKeys, ",")
|
|
||||||
if parentsQuery.Len() > 1 {
|
if parentsQuery.Len() > 1 {
|
||||||
_ = parentsQuery.WriteByte(')')
|
_ = parentsQuery.WriteByte(')')
|
||||||
query = append(query, parentsQuery.String())
|
query = append(query, parentsQuery.String())
|
||||||
@@ -865,8 +829,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
|||||||
if title != "" {
|
if title != "" {
|
||||||
searchTitle := f.opt.Enc.FromStandardName(title)
|
searchTitle := f.opt.Enc.FromStandardName(title)
|
||||||
// Escaping the backslash isn't documented but seems to work
|
// Escaping the backslash isn't documented but seems to work
|
||||||
searchTitle = strings.ReplaceAll(searchTitle, `\`, `\\`)
|
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||||
searchTitle = strings.ReplaceAll(searchTitle, `'`, `\'`)
|
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||||
|
|
||||||
var titleQuery bytes.Buffer
|
var titleQuery bytes.Buffer
|
||||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||||
@@ -920,7 +884,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
|||||||
}
|
}
|
||||||
list.SupportsAllDrives(true)
|
list.SupportsAllDrives(true)
|
||||||
list.IncludeItemsFromAllDrives(true)
|
list.IncludeItemsFromAllDrives(true)
|
||||||
if f.isTeamDrive && !f.opt.SharedWithMe {
|
if f.isTeamDrive {
|
||||||
list.DriveId(f.opt.TeamDriveID)
|
list.DriveId(f.opt.TeamDriveID)
|
||||||
list.Corpora("drive")
|
list.Corpora("drive")
|
||||||
}
|
}
|
||||||
@@ -928,10 +892,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
|||||||
if f.rootFolderID == "appDataFolder" {
|
if f.rootFolderID == "appDataFolder" {
|
||||||
list.Spaces("appDataFolder")
|
list.Spaces("appDataFolder")
|
||||||
}
|
}
|
||||||
// Add resource Keys if necessary
|
|
||||||
if resourceKeysHeader != "" {
|
|
||||||
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
||||||
|
|
||||||
@@ -1200,7 +1160,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||||||
grouping: listRGrouping,
|
grouping: listRGrouping,
|
||||||
listRmu: new(sync.Mutex),
|
listRmu: new(sync.Mutex),
|
||||||
listRempties: make(map[string]struct{}),
|
listRempties: make(map[string]struct{}),
|
||||||
dirResourceKeys: new(sync.Map),
|
|
||||||
}
|
}
|
||||||
f.isTeamDrive = opt.TeamDriveID != ""
|
f.isTeamDrive = opt.TeamDriveID != ""
|
||||||
f.fileFields = f.getFileFields()
|
f.fileFields = f.getFileFields()
|
||||||
@@ -1210,18 +1169,17 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
FilterAware: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
f.svc, err = drive.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
|
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.opt.V2DownloadMinSize >= 0 {
|
if f.opt.V2DownloadMinSize >= 0 {
|
||||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
f.v2Svc, err = drive_v2.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||||
}
|
}
|
||||||
@@ -1263,11 +1221,6 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||||
|
|
||||||
// If resource key is set then cache it for the root folder id
|
|
||||||
if f.opt.ResourceKey != "" {
|
|
||||||
f.dirResourceKeys.Store(f.rootFolderID, f.opt.ResourceKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse extensions
|
// Parse extensions
|
||||||
if f.opt.Extensions != "" {
|
if f.opt.Extensions != "" {
|
||||||
if f.opt.ExportExtensions != defaultExportExtensions {
|
if f.opt.ExportExtensions != defaultExportExtensions {
|
||||||
@@ -1366,16 +1319,12 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
o := &Object{
|
return &Object{
|
||||||
baseObject: f.newBaseObject(remote, info),
|
baseObject: f.newBaseObject(remote, info),
|
||||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||||
md5sum: strings.ToLower(info.Md5Checksum),
|
md5sum: strings.ToLower(info.Md5Checksum),
|
||||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||||
}
|
}
|
||||||
if info.ResourceKey != "" {
|
|
||||||
o.resourceKey = &info.ResourceKey
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDocumentObject creates an fs.Object for a google docs drive.File
|
// newDocumentObject creates an fs.Object for a google docs drive.File
|
||||||
@@ -2065,7 +2014,7 @@ func splitID(compositeID string) (actualID, shortcutID string) {
|
|||||||
|
|
||||||
// isShortcutID returns true if compositeID refers to a shortcut
|
// isShortcutID returns true if compositeID refers to a shortcut
|
||||||
func isShortcutID(compositeID string) bool {
|
func isShortcutID(compositeID string) bool {
|
||||||
return strings.ContainsRune(compositeID, shortcutSeparator)
|
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// actualID returns an actual ID from a composite ID
|
// actualID returns an actual ID from a composite ID
|
||||||
@@ -2136,10 +2085,6 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
|
|||||||
case item.MimeType == driveFolderType:
|
case item.MimeType == driveFolderType:
|
||||||
// cache the directory ID for later lookups
|
// cache the directory ID for later lookups
|
||||||
f.dirCache.Put(remote, item.Id)
|
f.dirCache.Put(remote, item.Id)
|
||||||
// cache the resource key for later lookups
|
|
||||||
if item.ResourceKey != "" {
|
|
||||||
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
|
|
||||||
}
|
|
||||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||||
if len(item.Parents) > 0 {
|
if len(item.Parents) > 0 {
|
||||||
@@ -2181,7 +2126,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -2223,10 +2168,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
|
|
||||||
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||||
if exportExt == "" {
|
if exportExt == "" {
|
||||||
return nil, fmt.Errorf("no export format found for %q", importMimeType)
|
return nil, fmt.Errorf("No export format found for %q", importMimeType)
|
||||||
}
|
}
|
||||||
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
||||||
return nil, fmt.Errorf("can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2415,9 +2360,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -2484,12 +2429,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
var info *drive.File
|
var info *drive.File
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
copy := f.svc.Files.Copy(id, createInfo).
|
info, err = f.svc.Files.Copy(id, createInfo).
|
||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
KeepRevisionForever(f.opt.KeepRevisionForever)
|
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||||
srcObj.addResourceKey(copy.Header())
|
Context(ctx).Do()
|
||||||
info, err = copy.Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2531,7 +2475,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
if f.opt.TrashedOnly {
|
if f.opt.TrashedOnly {
|
||||||
return errors.New("can't purge with --drive-trashed-only, use delete if you want to selectively delete files")
|
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||||
}
|
}
|
||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
@@ -2650,9 +2594,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -2997,12 +2941,12 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
|
|||||||
return fmt.Errorf("drive: failed when making oauth client: %w", err)
|
return fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||||
}
|
}
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
f.svc, err = drive.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't create Drive client: %w", err)
|
return fmt.Errorf("couldn't create Drive client: %w", err)
|
||||||
}
|
}
|
||||||
if f.opt.V2DownloadMinSize >= 0 {
|
if f.opt.V2DownloadMinSize >= 0 {
|
||||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
f.v2Svc, err = drive_v2.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||||
}
|
}
|
||||||
@@ -3291,7 +3235,7 @@ This will return a JSON list of objects like this
|
|||||||
|
|
||||||
With the -o config parameter it will output the list in a format
|
With the -o config parameter it will output the list in a format
|
||||||
suitable for adding to a config file to make aliases for all the
|
suitable for adding to a config file to make aliases for all the
|
||||||
drives found and a combined drive.
|
drives found.
|
||||||
|
|
||||||
[My Drive]
|
[My Drive]
|
||||||
type = alias
|
type = alias
|
||||||
@@ -3301,15 +3245,10 @@ drives found and a combined drive.
|
|||||||
type = alias
|
type = alias
|
||||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||||
|
|
||||||
[AllDrives]
|
|
||||||
type = combine
|
|
||||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
|
||||||
|
|
||||||
Adding this to the rclone config file will cause those team drives to
|
Adding this to the rclone config file will cause those team drives to
|
||||||
be accessible with the aliases shown. Any illegal characters will be
|
be accessible with the aliases shown. This may require manual editing
|
||||||
substituted with "_" and duplicate names will have numbers suffixed.
|
of the names.
|
||||||
It will also add a remote called AllDrives which shows all the shared
|
|
||||||
drives combined into one directory tree.
|
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "untrash",
|
Name: "untrash",
|
||||||
@@ -3357,12 +3296,6 @@ attempted if possible.
|
|||||||
|
|
||||||
Use the -i flag to see what would be copied before copying.
|
Use the -i flag to see what would be copied before copying.
|
||||||
`,
|
`,
|
||||||
}, {
|
|
||||||
Name: "exportformats",
|
|
||||||
Short: "Dump the export formats for debug purposes",
|
|
||||||
}, {
|
|
||||||
Name: "importformats",
|
|
||||||
Short: "Dump the import formats for debug purposes",
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -3382,7 +3315,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
out["service_account_file"] = f.opt.ServiceAccountFile
|
out["service_account_file"] = f.opt.ServiceAccountFile
|
||||||
}
|
}
|
||||||
if _, ok := opt["chunk_size"]; ok {
|
if _, ok := opt["chunk_size"]; ok {
|
||||||
out["chunk_size"] = f.opt.ChunkSize.String()
|
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
case "set":
|
case "set":
|
||||||
@@ -3399,11 +3332,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
}
|
}
|
||||||
if chunkSize, ok := opt["chunk_size"]; ok {
|
if chunkSize, ok := opt["chunk_size"]; ok {
|
||||||
chunkSizeMap := make(map[string]string)
|
chunkSizeMap := make(map[string]string)
|
||||||
chunkSizeMap["previous"] = f.opt.ChunkSize.String()
|
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||||
if err = f.changeChunkSize(chunkSize); err != nil {
|
if err = f.changeChunkSize(chunkSize); err != nil {
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
chunkSizeString := f.opt.ChunkSize.String()
|
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||||
f.m.Set("chunk_size", chunkSizeString)
|
f.m.Set("chunk_size", chunkSizeString)
|
||||||
chunkSizeMap["current"] = chunkSizeString
|
chunkSizeMap["current"] = chunkSizeString
|
||||||
out["chunk_size"] = chunkSizeMap
|
out["chunk_size"] = chunkSizeMap
|
||||||
@@ -3431,30 +3364,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
|
||||||
if _, ok := opt["config"]; ok {
|
if _, ok := opt["config"]; ok {
|
||||||
lines := []string{}
|
lines := []string{}
|
||||||
upstreams := []string{}
|
for _, drive := range drives {
|
||||||
names := make(map[string]struct{}, len(drives))
|
|
||||||
for i, drive := range drives {
|
|
||||||
name := re.ReplaceAllString(drive.Name, "_")
|
|
||||||
for {
|
|
||||||
if _, found := names[name]; !found {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
name += fmt.Sprintf("-%d", i)
|
|
||||||
}
|
|
||||||
names[name] = struct{}{}
|
|
||||||
lines = append(lines, "")
|
lines = append(lines, "")
|
||||||
lines = append(lines, fmt.Sprintf("[%s]", name))
|
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
||||||
lines = append(lines, "type = alias")
|
lines = append(lines, fmt.Sprintf("type = alias"))
|
||||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
||||||
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
|
|
||||||
}
|
}
|
||||||
lines = append(lines, "")
|
|
||||||
lines = append(lines, "[AllDrives]")
|
|
||||||
lines = append(lines, "type = combine")
|
|
||||||
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
|
|
||||||
return lines, nil
|
return lines, nil
|
||||||
}
|
}
|
||||||
return drives, nil
|
return drives, nil
|
||||||
@@ -3477,10 +3394,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
case "exportformats":
|
|
||||||
return f.exportFormats(ctx), nil
|
|
||||||
case "importformats":
|
|
||||||
return f.importFormats(ctx), nil
|
|
||||||
default:
|
default:
|
||||||
return nil, fs.ErrorCommandNotFound
|
return nil, fs.ErrorCommandNotFound
|
||||||
}
|
}
|
||||||
@@ -3530,6 +3443,12 @@ func (o *baseObject) Size() int64 {
|
|||||||
return o.bytes
|
return o.bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getRemoteInfo returns a drive.File for the remote
|
||||||
|
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
|
||||||
|
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||||
@@ -3570,6 +3489,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *baseObject) ModTime(ctx context.Context) time.Time {
|
func (o *baseObject) ModTime(ctx context.Context) time.Time {
|
||||||
@@ -3610,14 +3530,6 @@ func (o *baseObject) Storable() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
|
|
||||||
// object if required.
|
|
||||||
func (o *baseObject) addResourceKey(header http.Header) {
|
|
||||||
if o.resourceKey != nil {
|
|
||||||
header.Add("X-Goog-Drive-Resource-Keys", fmt.Sprintf("%s/%s", o.id, *o.resourceKey))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpResponse gets an http.Response object for the object
|
// httpResponse gets an http.Response object for the object
|
||||||
// using the url and method passed in
|
// using the url and method passed in
|
||||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||||
@@ -3633,7 +3545,6 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
|||||||
// Don't supply range requests for 0 length objects as they always fail
|
// Don't supply range requests for 0 length objects as they always fail
|
||||||
delete(req.Header, "Range")
|
delete(req.Header, "Range")
|
||||||
}
|
}
|
||||||
o.addResourceKey(req.Header)
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
res, err = o.fs.client.Do(req)
|
res, err = o.fs.client.Do(req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -3713,7 +3624,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
|||||||
url += "acknowledgeAbuse=true"
|
url += "acknowledgeAbuse=true"
|
||||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||||
} else {
|
} else {
|
||||||
err = fmt.Errorf("use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -3826,7 +3737,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
|||||||
|
|
||||||
// Update the already existing object
|
// Update the already existing object
|
||||||
//
|
//
|
||||||
// Copy the reader into the object updating modTime and size.
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/sync"
|
"github.com/rclone/rclone/fs/sync"
|
||||||
@@ -29,7 +28,6 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDriveScopes(t *testing.T) {
|
func TestDriveScopes(t *testing.T) {
|
||||||
@@ -192,60 +190,6 @@ func TestExtensionsForImportFormats(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
gatewayTimeout := googleapi.Error{
|
|
||||||
Code: 503,
|
|
||||||
}
|
|
||||||
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
|
|
||||||
assert.True(t, timeoutRetry)
|
|
||||||
assert.Equal(t, &gatewayTimeout, timeoutError)
|
|
||||||
generic403 := googleapi.Error{
|
|
||||||
Code: 403,
|
|
||||||
}
|
|
||||||
rLEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "rateLimitExceeded",
|
|
||||||
Message: "User rate limit exceeded.",
|
|
||||||
}
|
|
||||||
generic403.Errors = append(generic403.Errors, rLEItem)
|
|
||||||
oldStopUpload := f.opt.StopOnUploadLimit
|
|
||||||
oldStopDownload := f.opt.StopOnDownloadLimit
|
|
||||||
f.opt.StopOnUploadLimit = true
|
|
||||||
f.opt.StopOnDownloadLimit = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.StopOnUploadLimit = oldStopUpload
|
|
||||||
f.opt.StopOnDownloadLimit = oldStopDownload
|
|
||||||
}()
|
|
||||||
expectedRLError := fserrors.FatalError(&generic403)
|
|
||||||
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, rateLimitRetry)
|
|
||||||
assert.Equal(t, rateLimitErr, expectedRLError)
|
|
||||||
dQEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "downloadQuotaExceeded",
|
|
||||||
}
|
|
||||||
generic403.Errors[0] = dQEItem
|
|
||||||
expectedDQError := fserrors.FatalError(&generic403)
|
|
||||||
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, downloadQuotaRetry)
|
|
||||||
assert.Equal(t, downloadQuotaError, expectedDQError)
|
|
||||||
tDFLEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "teamDriveFileLimitExceeded",
|
|
||||||
}
|
|
||||||
generic403.Errors[0] = tDFLEItem
|
|
||||||
expectedTDFLError := fserrors.FatalError(&generic403)
|
|
||||||
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, teamDriveFileLimitRetry)
|
|
||||||
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
|
|
||||||
qEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "quotaExceeded",
|
|
||||||
}
|
|
||||||
generic403.Errors[0] = qEItem
|
|
||||||
expectedQuotaError := fserrors.FatalError(&generic403)
|
|
||||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, quotaExceededRetry)
|
|
||||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||||
oldAllow := f.opt.AllowImportNameChange
|
oldAllow := f.opt.AllowImportNameChange
|
||||||
f.opt.AllowImportNameChange = true
|
f.opt.AllowImportNameChange = true
|
||||||
@@ -434,9 +378,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
|||||||
// Make some objects, one in a subdir
|
// Make some objects, one in a subdir
|
||||||
contents := random.String(100)
|
contents := random.String(100)
|
||||||
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||||
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||||
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||||
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||||
|
|
||||||
// Check objects
|
// Check objects
|
||||||
checkObjects := func() {
|
checkObjects := func() {
|
||||||
@@ -518,9 +462,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
|||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||||
// Check set up for filtering
|
|
||||||
assert.True(t, f.Features().FilterAware)
|
|
||||||
|
|
||||||
opt := &filter.Opt{}
|
opt := &filter.Opt{}
|
||||||
err := opt.MaxAge.Set("1h")
|
err := opt.MaxAge.Set("1h")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -555,7 +496,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
||||||
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||||
|
|
||||||
// validate sync/copy
|
// validate sync/copy
|
||||||
const timeQuery = "(modifiedTime >= '"
|
const timeQuery = "(modifiedTime >= '"
|
||||||
@@ -604,7 +545,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
t.Run("CopyID", f.InternalTestCopyID)
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -118,12 +118,12 @@ func (b *batcher) Batching() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||||
var arg = &files.UploadSessionFinishBatchArg{
|
var arg = &files.UploadSessionFinishBatchArg{
|
||||||
Entries: items,
|
Entries: items,
|
||||||
}
|
}
|
||||||
err = b.f.pacer.Call(func() (bool, error) {
|
err = b.f.pacer.Call(func() (bool, error) {
|
||||||
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||||
// If error is insufficient space then don't retry
|
// If error is insufficient space then don't retry
|
||||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
@@ -137,7 +137,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||||
}
|
}
|
||||||
return complete, nil
|
return batchStatus, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||||
@@ -199,11 +199,26 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
fs.Debugf(b.f, "Committing %s", desc)
|
fs.Debugf(b.f, "Committing %s", desc)
|
||||||
|
|
||||||
// finalise the batch getting either a result or a job id to poll
|
// finalise the batch getting either a result or a job id to poll
|
||||||
complete, err := b.finishBatch(ctx, items)
|
batchStatus, err := b.finishBatch(ctx, items)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check whether batch is complete
|
||||||
|
var complete *files.UploadSessionFinishBatchResult
|
||||||
|
switch batchStatus.Tag {
|
||||||
|
case "async_job_id":
|
||||||
|
// wait for batch to complete
|
||||||
|
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case "complete":
|
||||||
|
complete = batchStatus.Complete
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||||
|
}
|
||||||
|
|
||||||
// Check we got the right number of entries
|
// Check we got the right number of entries
|
||||||
entries := complete.Entries
|
entries := complete.Entries
|
||||||
if len(entries) != len(results) {
|
if len(entries) != len(results) {
|
||||||
@@ -304,12 +319,9 @@ outer:
|
|||||||
//
|
//
|
||||||
// Can be called from atexit handler
|
// Can be called from atexit handler
|
||||||
func (b *batcher) Shutdown() {
|
func (b *batcher) Shutdown() {
|
||||||
if !b.Batching() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.shutOnce.Do(func() {
|
b.shutOnce.Do(func() {
|
||||||
atexit.Unregister(b.atexit)
|
atexit.Unregister(b.atexit)
|
||||||
fs.Infof(b.f, "Committing uploads - please wait...")
|
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||||
// show that batcher is shutting down
|
// show that batcher is shutting down
|
||||||
close(b.closed)
|
close(b.closed)
|
||||||
// quit the commitLoop by sending a quitRequest message
|
// quit the commitLoop by sending a quitRequest message
|
||||||
|
|||||||
@@ -268,7 +268,7 @@ default based on the batch_mode in use.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "batch_commit_timeout",
|
Name: "batch_commit_timeout",
|
||||||
Help: `Max time to wait for a batch to finish committing`,
|
Help: `Max time to wait for a batch to finish comitting`,
|
||||||
Default: fs.Duration(10 * time.Minute),
|
Default: fs.Duration(10 * time.Minute),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -472,12 +472,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
args := team.NewMembersGetInfoArgs(members)
|
args := team.NewMembersGetInfoArgs(members)
|
||||||
|
|
||||||
memberIds, err := f.team.MembersGetInfo(args)
|
memberIds, err := f.team.MembersGetInfo(args)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||||
}
|
}
|
||||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
|
||||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||||
}
|
}
|
||||||
@@ -925,7 +923,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -1044,9 +1042,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1105,9 +1103,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1199,7 +1197,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(listRes.Links) == 0 {
|
if len(listRes.Links) == 0 {
|
||||||
err = errors.New("sharing link already exists, but list came back empty")
|
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
linkRes = listRes.Links[0]
|
linkRes = listRes.Links[0]
|
||||||
@@ -1211,7 +1209,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
case *sharing.FolderLinkMetadata:
|
case *sharing.FolderLinkMetadata:
|
||||||
link = res.Url
|
link = res.Url
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
|
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -1271,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("about failed: %w", err)
|
||||||
}
|
}
|
||||||
var total uint64
|
var total uint64
|
||||||
if q.Allocation != nil {
|
if q.Allocation != nil {
|
||||||
@@ -1372,12 +1370,10 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
|
|
||||||
if timeout < 30 {
|
if timeout < 30 {
|
||||||
timeout = 30
|
timeout = 30
|
||||||
fs.Debugf(f, "Increasing poll interval to minimum 30s")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeout > 480 {
|
if timeout > 480 {
|
||||||
timeout = 480
|
timeout = 480
|
||||||
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -1435,7 +1431,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
}
|
}
|
||||||
|
|
||||||
if entryPath != "" {
|
if entryPath != "" {
|
||||||
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
|
notifyFunc(entryPath, entryType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !changeList.HasMore {
|
if !changeList.HasMore {
|
||||||
@@ -1669,7 +1665,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
|||||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||||
skip += delta
|
skip += delta
|
||||||
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||||
if skip < 0 {
|
if skip < 0 {
|
||||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||||
} else if skip == chunkSize {
|
} else if skip == chunkSize {
|
||||||
@@ -1697,9 +1693,6 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
|||||||
if size > 0 {
|
if size > 0 {
|
||||||
// if size is known, check if next chunk is final
|
// if size is known, check if next chunk is final
|
||||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||||
if in.BytesRead() > uint64(size) {
|
|
||||||
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||||
@@ -1763,7 +1756,7 @@ func checkPathLength(name string) (err error) {
|
|||||||
|
|
||||||
// Update the already existing object
|
// Update the already existing object
|
||||||
//
|
//
|
||||||
// Copy the reader into the object updating modTime and size.
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
|||||||
@@ -28,44 +28,25 @@ var retryErrorCodes = []int{
|
|||||||
509, // Bandwidth Limit Exceeded
|
509, // Bandwidth Limit Exceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
|
||||||
|
|
||||||
func parseFichierError(err error) int {
|
|
||||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
|
||||||
if len(matches) == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
code, err := strconv.Atoi(matches[0])
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return code
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with
|
// Detect this error which the integration tests provoke
|
||||||
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||||
//
|
//
|
||||||
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly
|
// https://1fichier.com/api.html
|
||||||
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke
|
//
|
||||||
// The list below is far from complete and should be expanded if we see any more error codes.
|
// file/ls.cgi is limited :
|
||||||
if err != nil {
|
//
|
||||||
switch parseFichierError(err) {
|
// Warning (can be changed in case of abuses) :
|
||||||
case 93:
|
// List all files of the account is limited to 1 request per hour.
|
||||||
return false, err // No such user
|
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||||
case 186:
|
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||||
return false, err // IP blocked?
|
|
||||||
case 374:
|
|
||||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
@@ -487,7 +468,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
|||||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||||
|
|
||||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||||
return nil, errors.New("invalid UploadID")
|
return nil, errors.New("Invalid UploadID")
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -529,7 +510,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
|||||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||||
|
|
||||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||||
return nil, errors.New("invalid UploadID")
|
return nil, errors.New("Invalid UploadID")
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package fichier provides an interface to the 1Fichier storage system.
|
|
||||||
package fichier
|
package fichier
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -295,7 +294,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
path, ok := f.dirCache.GetInv(directoryID)
|
path, ok := f.dirCache.GetInv(directoryID)
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("cannot find dir in dircache")
|
return nil, errors.New("Cannot find dir in dircache")
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.newObjectFromFile(ctx, path, file), nil
|
return f.newObjectFromFile(ctx, path, file), nil
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ type CopyFileResponse struct {
|
|||||||
URLs []FileCopy `json:"urls"`
|
URLs []FileCopy `json:"urls"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileCopy is used in the CopyFileResponse
|
// FileCopy is used in the the CopyFileResponse
|
||||||
type FileCopy struct {
|
type FileCopy struct {
|
||||||
FromURL string `json:"from_url"`
|
FromURL string `json:"from_url"`
|
||||||
ToURL string `json:"to_url"`
|
ToURL string `json:"to_url"`
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ const (
|
|||||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents date and time information for the
|
// Time represents represents date and time information for the
|
||||||
// filefabric API
|
// filefabric API
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
@@ -95,7 +95,7 @@ type Status struct {
|
|||||||
// Warning string `json:"warning"` // obsolete
|
// Warning string `json:"warning"` // obsolete
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status satisfies the error interface
|
// Status statisfies the error interface
|
||||||
func (e *Status) Error() string {
|
func (e *Status) Error() string {
|
||||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ type Fs struct {
|
|||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
m configmap.Mapper // to save config
|
m configmap.Mapper // to save config
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenMu sync.Mutex // hold when reading the token
|
tokenMu sync.Mutex // hold when reading the token
|
||||||
@@ -373,7 +373,7 @@ type params map[string]interface{}
|
|||||||
|
|
||||||
// rpc calls the rpc.php method of the SME file fabric
|
// rpc calls the rpc.php method of the SME file fabric
|
||||||
//
|
//
|
||||||
// This is an entry point to all the method calls.
|
// This is an entry point to all the method calls
|
||||||
//
|
//
|
||||||
// If result is nil then resp.Body will need closing
|
// If result is nil then resp.Body will need closing
|
||||||
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
|
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
|
||||||
@@ -490,7 +490,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// Root is a dir - cache its ID
|
// Root is a dir - cache its ID
|
||||||
f.dirCache.Put(f.root, info.ID)
|
f.dirCache.Put(f.root, info.ID)
|
||||||
}
|
}
|
||||||
//} else {
|
} else {
|
||||||
// Root is not found so a directory
|
// Root is not found so a directory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -678,7 +678,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
// must have setMetaData called on it
|
// must have setMetaData called on it
|
||||||
//
|
//
|
||||||
// Returns the object, leaf, directoryID and error.
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
@@ -697,7 +697,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -783,9 +783,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the background task to complete if necessary
|
// Wait for the the background task to complete if necessary
|
||||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||||
if taskID == "" || taskID == "0" {
|
if taskID == "" || taskID == "0" {
|
||||||
// No task to wait for
|
// No task to wait for
|
||||||
@@ -956,9 +956,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
|||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1135,6 +1135,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
@@ -1200,7 +1201,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ const (
|
|||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "ftp",
|
Name: "ftp",
|
||||||
Description: "FTP",
|
Description: "FTP Connection",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "host",
|
Name: "host",
|
||||||
@@ -82,21 +82,7 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "concurrency",
|
Name: "concurrency",
|
||||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||||
|
|
||||||
Note that setting this is very likely to cause deadlocks so it should
|
|
||||||
be used with care.
|
|
||||||
|
|
||||||
If you are doing a sync or copy then make sure concurrency is one more
|
|
||||||
than the sum of |--transfers| and |--checkers|.
|
|
||||||
|
|
||||||
If you use |--check-first| then it just needs to be one more than the
|
|
||||||
maximum of |--checkers| and |--transfers|.
|
|
||||||
|
|
||||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
|
||||||
--check-first| or |--checkers 1 --transfers 1|.
|
|
||||||
|
|
||||||
`, "|", "`", -1),
|
|
||||||
Default: 0,
|
Default: 0,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -114,21 +100,11 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
|||||||
Help: "Disable using MLSD even if server advertises support.",
|
Help: "Disable using MLSD even if server advertises support.",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "disable_utf8",
|
|
||||||
Help: "Disable using UTF-8 even if server advertises support.",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "writing_mdtm",
|
Name: "writing_mdtm",
|
||||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "force_list_hidden",
|
|
||||||
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "idle_timeout",
|
Name: "idle_timeout",
|
||||||
Default: fs.Duration(60 * time.Second),
|
Default: fs.Duration(60 * time.Second),
|
||||||
@@ -208,9 +184,7 @@ type Options struct {
|
|||||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||||
DisableEPSV bool `config:"disable_epsv"`
|
DisableEPSV bool `config:"disable_epsv"`
|
||||||
DisableMLSD bool `config:"disable_mlsd"`
|
DisableMLSD bool `config:"disable_mlsd"`
|
||||||
DisableUTF8 bool `config:"disable_utf8"`
|
|
||||||
WritingMDTM bool `config:"writing_mdtm"`
|
WritingMDTM bool `config:"writing_mdtm"`
|
||||||
ForceListHidden bool `config:"force_list_hidden"`
|
|
||||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||||
@@ -336,44 +310,14 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
|
|
||||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||||
initialConnection := true
|
|
||||||
dial := func(network, address string) (conn net.Conn, err error) {
|
dial := func(network, address string) (conn net.Conn, err error) {
|
||||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
|
||||||
defer func() {
|
|
||||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
|
||||||
}()
|
|
||||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||||
if err != nil {
|
if f.tlsConf != nil && err == nil {
|
||||||
return nil, err
|
conn = tls.Client(conn, f.tlsConf)
|
||||||
}
|
}
|
||||||
// Connect using cleartext only for non TLS
|
return
|
||||||
if f.tlsConf == nil {
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
// Initial connection only needs to be cleartext for explicit TLS
|
|
||||||
if f.opt.ExplicitTLS && initialConnection {
|
|
||||||
initialConnection = false
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
// Upgrade connection to TLS
|
|
||||||
tlsConn := tls.Client(conn, f.tlsConf)
|
|
||||||
// Do the initial handshake - tls.Client doesn't do it for us
|
|
||||||
// If we do this then connections to proftpd/pureftpd lock up
|
|
||||||
// See: https://github.com/rclone/rclone/issues/6426
|
|
||||||
// See: https://github.com/jlaffaye/ftp/issues/282
|
|
||||||
if false {
|
|
||||||
err = tlsConn.HandshakeContext(ctx)
|
|
||||||
if err != nil {
|
|
||||||
_ = conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tlsConn, nil
|
|
||||||
}
|
|
||||||
ftpConfig := []ftp.DialOption{
|
|
||||||
ftp.DialWithContext(ctx),
|
|
||||||
ftp.DialWithDialFunc(dial),
|
|
||||||
}
|
}
|
||||||
|
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||||
|
|
||||||
if f.opt.TLS {
|
if f.opt.TLS {
|
||||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||||
@@ -381,6 +325,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||||
} else if f.opt.ExplicitTLS {
|
} else if f.opt.ExplicitTLS {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||||
|
// Initial connection needs to be cleartext for explicit TLS
|
||||||
|
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||||
}
|
}
|
||||||
if f.opt.DisableEPSV {
|
if f.opt.DisableEPSV {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||||
@@ -388,18 +338,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
if f.opt.DisableMLSD {
|
if f.opt.DisableMLSD {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||||
}
|
}
|
||||||
if f.opt.DisableUTF8 {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
|
|
||||||
}
|
|
||||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
||||||
}
|
}
|
||||||
if f.opt.WritingMDTM {
|
if f.opt.WritingMDTM {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||||
}
|
}
|
||||||
if f.opt.ForceListHidden {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
|
|
||||||
}
|
|
||||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||||
}
|
}
|
||||||
@@ -534,7 +478,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
protocol = "ftps://"
|
protocol = "ftps://"
|
||||||
}
|
}
|
||||||
if opt.TLS && opt.ExplicitTLS {
|
if opt.TLS && opt.ExplicitTLS {
|
||||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||||
}
|
}
|
||||||
var tlsConfig *tls.Config
|
var tlsConfig *tls.Config
|
||||||
if opt.TLS || opt.ExplicitTLS {
|
if opt.TLS || opt.ExplicitTLS {
|
||||||
@@ -765,7 +709,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
// if timer fired assume no error but connection dead
|
// if timer fired assume no error but connection dead
|
||||||
fs.Errorf(f, "Timeout when waiting for List")
|
fs.Errorf(f, "Timeout when waiting for List")
|
||||||
return nil, errors.New("timeout when waiting for List")
|
return nil, errors.New("Timeout when waiting for List")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Annoyingly FTP returns success for a directory which
|
// Annoyingly FTP returns success for a directory which
|
||||||
@@ -821,7 +765,6 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// - accepts the MFMT command to set file time (fSetTime)
|
// - accepts the MFMT command to set file time (fSetTime)
|
||||||
// or non-standard form of the MDTM command (fSetTime, too)
|
// or non-standard form of the MDTM command (fSetTime, too)
|
||||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||||
//
|
|
||||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||||
func (f *Fs) Precision() time.Duration {
|
func (f *Fs) Precision() time.Duration {
|
||||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
||||||
@@ -1197,7 +1140,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
|
|
||||||
// Update the already existing object
|
// Update the already existing object
|
||||||
//
|
//
|
||||||
// Copy the reader into the object updating modTime and size.
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
|||||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||||
const (
|
const (
|
||||||
fileSize = 100000000 // 100 MiB
|
fileSize = 100000000 // 100 MiB
|
||||||
idleTimeout = 1 * time.Second // small because test server is local
|
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||||
maxTime = 10 * time.Second // prevent test hangup
|
maxTime = 10 * time.Second // prevent test hangup
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -44,7 +43,6 @@ import (
|
|||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
option "google.golang.org/api/option"
|
|
||||||
|
|
||||||
// NOTE: This API is deprecated
|
// NOTE: This API is deprecated
|
||||||
storage "google.golang.org/api/storage/v1"
|
storage "google.golang.org/api/storage/v1"
|
||||||
@@ -306,23 +304,6 @@ rclone does if you know the bucket exists already.
|
|||||||
`,
|
`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "decompress",
|
|
||||||
Help: `If set this will decompress gzip encoded objects.
|
|
||||||
|
|
||||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
|
||||||
set. Normally rclone will download these files as compressed objects.
|
|
||||||
|
|
||||||
If this flag is set then rclone will decompress these files with
|
|
||||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
|
||||||
can't check the size and hash but the file contents will be decompressed.
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
|
||||||
Name: "endpoint",
|
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -346,8 +327,6 @@ type Options struct {
|
|||||||
Location string `config:"location"`
|
Location string `config:"location"`
|
||||||
StorageClass string `config:"storage_class"`
|
StorageClass string `config:"storage_class"`
|
||||||
NoCheckBucket bool `config:"no_check_bucket"`
|
NoCheckBucket bool `config:"no_check_bucket"`
|
||||||
Decompress bool `config:"decompress"`
|
|
||||||
Endpoint string `config:"endpoint"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -363,7 +342,6 @@ type Fs struct {
|
|||||||
rootDirectory string // directory part of root (if any)
|
rootDirectory string // directory part of root (if any)
|
||||||
cache *bucket.Cache // cache of bucket status
|
cache *bucket.Cache // cache of bucket status
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
warnCompressed sync.Once // warn once about compressed files
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
@@ -377,7 +355,6 @@ type Object struct {
|
|||||||
bytes int64 // Bytes in the object
|
bytes int64 // Bytes in the object
|
||||||
modTime time.Time // Modified time of the object
|
modTime time.Time // Modified time of the object
|
||||||
mimeType string
|
mimeType string
|
||||||
gzipped bool // set if object has Content-Encoding: gzip
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -395,7 +372,7 @@ func (f *Fs) Root() string {
|
|||||||
// String converts this Fs to a string
|
// String converts this Fs to a string
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
if f.rootBucket == "" {
|
if f.rootBucket == "" {
|
||||||
return "GCS root"
|
return fmt.Sprintf("GCS root")
|
||||||
}
|
}
|
||||||
if f.rootDirectory == "" {
|
if f.rootDirectory == "" {
|
||||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||||
@@ -528,11 +505,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
|
f.svc, err = storage.New(f.client)
|
||||||
if opt.Endpoint != "" {
|
|
||||||
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
|
|
||||||
}
|
|
||||||
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||||
}
|
}
|
||||||
@@ -589,7 +562,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
|||||||
//
|
//
|
||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories.
|
// Set recurse to read sub directories
|
||||||
//
|
//
|
||||||
// The remote has prefix removed from it and if addBucket is set
|
// The remote has prefix removed from it and if addBucket is set
|
||||||
// then it adds the bucket to the start.
|
// then it adds the bucket to the start.
|
||||||
@@ -807,7 +780,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
|
|
||||||
// Put the object into the bucket
|
// Put the object into the bucket
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -909,9 +882,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1002,7 +975,6 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
o.url = info.MediaLink
|
o.url = info.MediaLink
|
||||||
o.bytes = int64(info.Size)
|
o.bytes = int64(info.Size)
|
||||||
o.mimeType = info.ContentType
|
o.mimeType = info.ContentType
|
||||||
o.gzipped = info.ContentEncoding == "gzip"
|
|
||||||
|
|
||||||
// Read md5sum
|
// Read md5sum
|
||||||
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
||||||
@@ -1041,12 +1013,6 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
} else {
|
} else {
|
||||||
o.modTime = modTime
|
o.modTime = modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// If gunzipping then size and md5sum are unknown
|
|
||||||
if o.gzipped && o.fs.opt.Decompress {
|
|
||||||
o.bytes = -1
|
|
||||||
o.md5sum = ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readObjectInfo reads the definition for an object
|
// readObjectInfo reads the definition for an object
|
||||||
@@ -1147,18 +1113,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs.FixRangeOption(options, o.bytes)
|
fs.FixRangeOption(options, o.bytes)
|
||||||
if o.gzipped && !o.fs.opt.Decompress {
|
|
||||||
// Allow files which are stored on the cloud storage system
|
|
||||||
// compressed to be downloaded without being decompressed. Note
|
|
||||||
// that setting this here overrides the automatic decompression
|
|
||||||
// in the Transport.
|
|
||||||
//
|
|
||||||
// See: https://cloud.google.com/storage/docs/transcoding
|
|
||||||
req.Header.Set("Accept-Encoding", "gzip")
|
|
||||||
o.fs.warnCompressed.Do(func() {
|
|
||||||
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package api provides types used by the Google Photos API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -178,7 +178,7 @@ type Fs struct {
|
|||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
unAuth *rest.Client // unauthenticated http client
|
unAuth *rest.Client // unauthenticated http client
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the one drive server
|
||||||
ts *oauthutil.TokenSource // token source for oauth2
|
ts *oauthutil.TokenSource // token source for oauth2
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
startTime time.Time // time Fs was started - used for datestamps
|
startTime time.Time // time Fs was started - used for datestamps
|
||||||
@@ -562,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
|||||||
for i := range items {
|
for i := range items {
|
||||||
item := &result.MediaItems[i]
|
item := &result.MediaItems[i]
|
||||||
remote := item.Filename
|
remote := item.Filename
|
||||||
remote = strings.ReplaceAll(remote, "/", "/")
|
remote = strings.Replace(remote, "/", "/", -1)
|
||||||
err = fn(remote, item, false)
|
err = fn(remote, item, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -661,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
// Put the object into the bucket
|
// Put the object into the bucket
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||||
if err == fs.ErrorNotFoundInConfigFile {
|
if err == fs.ErrorNotFoundInConfigFile {
|
||||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@@ -315,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
|||||||
|
|
||||||
// featureFilter creates a filter for the Feature enum
|
// featureFilter creates a filter for the Feature enum
|
||||||
//
|
//
|
||||||
// The API only supports one feature, FAVORITES, so hardcode that feature.
|
// The API only supports one feature, FAVORITES, so hardcode that feature
|
||||||
//
|
//
|
||||||
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
|
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
|
||||||
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
|
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
|
|||||||
|
|
||||||
// mock listUploads for testing
|
// mock listUploads for testing
|
||||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entries = f.uploaded[dir]
|
entries, _ = f.uploaded[dir]
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -27,9 +27,6 @@ func init() {
|
|||||||
Name: "hasher",
|
Name: "hasher",
|
||||||
Description: "Better checksums for other remotes",
|
Description: "Better checksums for other remotes",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
@@ -161,11 +158,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
|||||||
IsLocal: true,
|
IsLocal: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
SetTier: true,
|
|
||||||
GetTier: true,
|
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
}
|
}
|
||||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||||
|
|
||||||
@@ -290,7 +282,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
if do := f.Fs.Features().CleanUp; do != nil {
|
if do := f.Fs.Features().CleanUp; do != nil {
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("CleanUp not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
@@ -298,7 +290,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
if do := f.Fs.Features().About; do != nil {
|
if do := f.Fs.Features().About; do != nil {
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path that has had changes.
|
// ChangeNotify calls the passed function with a path that has had changes.
|
||||||
@@ -493,17 +485,6 @@ func (o *Object) MimeType(ctx context.Context) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.Object.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -526,5 +507,10 @@ var (
|
|||||||
_ fs.UserInfoer = (*Fs)(nil)
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
_ fs.SetTierer = (*Object)(nil)
|
||||||
|
_ fs.GetTierer = (*Object)(nil)
|
||||||
|
_ fs.MimeTyper = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ import (
|
|||||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
||||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||||
o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||||
require.NotNil(t, o)
|
require.NotNil(t, o)
|
||||||
return o
|
return o
|
||||||
}
|
}
|
||||||
@@ -35,7 +35,7 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
|||||||
// make a temporary crypt remote
|
// make a temporary crypt remote
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
pass := obscure.MustObscure("crypt")
|
pass := obscure.MustObscure("crypt")
|
||||||
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass)
|
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
||||||
cryptFs, err := fs.NewFs(ctx, remote)
|
cryptFs, err := fs.NewFs(ctx, remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ func TestIntegration(t *testing.T) {
|
|||||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
||||||
}
|
}
|
||||||
opt.RemoteName = "TestHasher:"
|
opt.RemoteName = "TestHasher:"
|
||||||
opt.QuickTestOK = true
|
|
||||||
}
|
}
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.ServicePrincipalName != "" {
|
if opt.ServicePrincipalName != "" {
|
||||||
options.KerberosClient, err = getKerberosClient()
|
options.KerberosClient, err = getKerberosClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("problem with kerberos authentication: %w", err)
|
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||||
}
|
}
|
||||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||||
|
|
||||||
@@ -265,9 +265,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
// Package hdfs provides an interface to the HDFS storage system.
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = o.fs.client.Stat(realpath)
|
info, err := o.fs.client.Stat(realpath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = o.fs.client.Remove(realpath)
|
err = o.fs.client.Remove(realpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -147,7 +147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := o.fs.client.Stat(realpath)
|
info, err = o.fs.client.Stat(realpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,81 +0,0 @@
|
|||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Some presets for different amounts of information that can be requested for fields;
|
|
||||||
// it is recommended to only request the information that is actually needed.
|
|
||||||
var (
|
|
||||||
HiDriveObjectNoMetadataFields = []string{"name", "type"}
|
|
||||||
HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash")
|
|
||||||
HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers")
|
|
||||||
DirectoryContentFields = []string{"nmembers"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// QueryParameters represents the parameters passed to an API-call.
|
|
||||||
type QueryParameters struct {
|
|
||||||
url.Values
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewQueryParameters initializes an instance of QueryParameters and
|
|
||||||
// returns a pointer to it.
|
|
||||||
func NewQueryParameters() *QueryParameters {
|
|
||||||
return &QueryParameters{url.Values{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFileInDirectory sets the appropriate parameters
|
|
||||||
// to specify a path to a file in a directory.
|
|
||||||
// This is used by requests that work with paths for files that do not exist yet.
|
|
||||||
// (For example when creating a file).
|
|
||||||
// Most requests use the format produced by SetPath(...).
|
|
||||||
func (p *QueryParameters) SetFileInDirectory(filePath string) {
|
|
||||||
directory, file := path.Split(path.Clean(filePath))
|
|
||||||
p.Set("dir", path.Clean(directory))
|
|
||||||
p.Set("name", file)
|
|
||||||
// NOTE: It would be possible to switch to pid-based requests
|
|
||||||
// by modifying this function.
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPath sets the appropriate parameters to access the given path.
|
|
||||||
func (p *QueryParameters) SetPath(objectPath string) {
|
|
||||||
p.Set("path", path.Clean(objectPath))
|
|
||||||
// NOTE: It would be possible to switch to pid-based requests
|
|
||||||
// by modifying this function.
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTime sets the key to the time-value. It replaces any existing values.
|
|
||||||
func (p *QueryParameters) SetTime(key string, value time.Time) error {
|
|
||||||
valueAPI := Time(value)
|
|
||||||
valueBytes, err := json.Marshal(&valueAPI)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.Set(key, string(valueBytes))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddList adds the given values as a list
|
|
||||||
// with each value separated by the separator.
|
|
||||||
// It appends to any existing values associated with key.
|
|
||||||
func (p *QueryParameters) AddList(key string, separator string, values ...string) {
|
|
||||||
original := p.Get(key)
|
|
||||||
p.Set(key, strings.Join(values, separator))
|
|
||||||
if original != "" {
|
|
||||||
p.Set(key, original+separator+p.Get(key))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddFields sets the appropriate parameter to access the given fields.
|
|
||||||
// The given fields will be appended to any other existing fields.
|
|
||||||
func (p *QueryParameters) AddFields(prefix string, fields ...string) {
|
|
||||||
modifiedFields := make([]string, len(fields))
|
|
||||||
for i, field := range fields {
|
|
||||||
modifiedFields[i] = prefix + field
|
|
||||||
}
|
|
||||||
p.AddList("fields", ",", modifiedFields...)
|
|
||||||
}
|
|
||||||
@@ -1,135 +0,0 @@
|
|||||||
// Package api has type definitions and code related to API-calls for the HiDrive-API.
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time represents date and time information for the API.
|
|
||||||
type Time time.Time
|
|
||||||
|
|
||||||
// MarshalJSON turns Time into JSON (in Unix-time/UTC).
|
|
||||||
func (t *Time) MarshalJSON() ([]byte, error) {
|
|
||||||
secs := time.Time(*t).Unix()
|
|
||||||
return []byte(strconv.FormatInt(secs, 10)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into Time.
|
|
||||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
|
||||||
secs, err := strconv.ParseInt(string(data), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*t = Time(time.Unix(secs, 0))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error is returned from the API when things go wrong.
|
|
||||||
type Error struct {
|
|
||||||
Code json.Number `json:"code"`
|
|
||||||
ContextInfo json.RawMessage
|
|
||||||
Message string `json:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface.
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
out := fmt.Sprintf("Error %q", e.Code.String())
|
|
||||||
if e.Message != "" {
|
|
||||||
out += ": " + e.Message
|
|
||||||
}
|
|
||||||
if e.ContextInfo != nil {
|
|
||||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Error satisfies the error interface.
|
|
||||||
var _ error = (*Error)(nil)
|
|
||||||
|
|
||||||
// possible types for HiDriveObject
|
|
||||||
const (
|
|
||||||
HiDriveObjectTypeDirectory = "dir"
|
|
||||||
HiDriveObjectTypeFile = "file"
|
|
||||||
HiDriveObjectTypeSymlink = "symlink"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HiDriveObject describes a folder, a symlink or a file.
|
|
||||||
// Depending on the type and content, not all fields are present.
|
|
||||||
type HiDriveObject struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
ParentID string `json:"parent_id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
MemberCount int64 `json:"nmembers"`
|
|
||||||
ModifiedAt Time `json:"mtime"`
|
|
||||||
ChangedAt Time `json:"ctime"`
|
|
||||||
MetaHash string `json:"mhash"`
|
|
||||||
MetaOnlyHash string `json:"mohash"`
|
|
||||||
NameHash string `json:"nhash"`
|
|
||||||
ContentHash string `json:"chash"`
|
|
||||||
IsTeamfolder bool `json:"teamfolder"`
|
|
||||||
Readable bool `json:"readable"`
|
|
||||||
Writable bool `json:"writable"`
|
|
||||||
Shareable bool `json:"shareable"`
|
|
||||||
MIMEType string `json:"mime_type"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the HiDriveObject.
|
|
||||||
func (i *HiDriveObject) ModTime() time.Time {
|
|
||||||
t := time.Time(i.ModifiedAt)
|
|
||||||
if t.IsZero() {
|
|
||||||
t = time.Time(i.ChangedAt)
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into HiDriveObject and
|
|
||||||
// introduces specific default-values where necessary.
|
|
||||||
func (i *HiDriveObject) UnmarshalJSON(data []byte) error {
|
|
||||||
type objectAlias HiDriveObject
|
|
||||||
defaultObject := objectAlias{
|
|
||||||
Size: -1,
|
|
||||||
MemberCount: -1,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := json.Unmarshal(data, &defaultObject)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
name, err := url.PathUnescape(defaultObject.Name)
|
|
||||||
if err == nil {
|
|
||||||
defaultObject.Name = name
|
|
||||||
}
|
|
||||||
|
|
||||||
*i = HiDriveObject(defaultObject)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryContent describes the content of a directory.
|
|
||||||
type DirectoryContent struct {
|
|
||||||
TotalCount int64 `json:"nmembers"`
|
|
||||||
Entries []HiDriveObject `json:"members"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into DirectoryContent and
|
|
||||||
// introduces specific default-values where necessary.
|
|
||||||
func (d *DirectoryContent) UnmarshalJSON(data []byte) error {
|
|
||||||
type directoryContentAlias DirectoryContent
|
|
||||||
defaultDirectoryContent := directoryContentAlias{
|
|
||||||
TotalCount: -1,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := json.Unmarshal(data, &defaultDirectoryContent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*d = DirectoryContent(defaultDirectoryContent)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,888 +0,0 @@
|
|||||||
package hidrive
|
|
||||||
|
|
||||||
// This file is for helper-functions which may provide more general and
|
|
||||||
// specialized functionality than the generic interfaces.
|
|
||||||
// There are two sections:
|
|
||||||
// 1. methods bound to Fs
|
|
||||||
// 2. other functions independent from Fs used throughout the package
|
|
||||||
|
|
||||||
// NOTE: Functions accessing paths expect any relative paths
|
|
||||||
// to be resolved prior to execution with resolvePath(...).
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/hidrive/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/ranges"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"golang.org/x/sync/semaphore"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// MaximumUploadBytes represents the maximum amount of bytes
|
|
||||||
// a single upload-operation will support.
|
|
||||||
MaximumUploadBytes = 2147483647 // = 2GiB - 1
|
|
||||||
// iterationChunkSize represents the chunk size used to iterate directory contents.
|
|
||||||
iterationChunkSize = 5000
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// retryErrorCodes is a slice of error codes that we will always retry.
|
|
||||||
retryErrorCodes = []int{
|
|
||||||
429, // Too Many Requests
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
// ErrorFileExists is returned when a query tries to create a file
|
|
||||||
// that already exists.
|
|
||||||
ErrorFileExists = errors.New("destination file already exists")
|
|
||||||
)
|
|
||||||
|
|
||||||
// MemberType represents the possible types of entries a directory can contain.
|
|
||||||
type MemberType string
|
|
||||||
|
|
||||||
// possible values for MemberType
|
|
||||||
const (
|
|
||||||
AllMembers MemberType = "all"
|
|
||||||
NoMembers MemberType = "none"
|
|
||||||
DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
|
|
||||||
FileMembers MemberType = api.HiDriveObjectTypeFile
|
|
||||||
SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink
|
|
||||||
)
|
|
||||||
|
|
||||||
// SortByField represents possible fields to sort entries of a directory by.
|
|
||||||
type SortByField string
|
|
||||||
|
|
||||||
// possible values for SortByField
|
|
||||||
const (
|
|
||||||
descendingSort string = "-"
|
|
||||||
SortByName SortByField = "name"
|
|
||||||
SortByModTime SortByField = "mtime"
|
|
||||||
SortByObjectType SortByField = "type"
|
|
||||||
SortBySize SortByField = "size"
|
|
||||||
SortByNameDescending SortByField = SortByField(descendingSort) + SortByName
|
|
||||||
SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime
|
|
||||||
SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
|
|
||||||
SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Unsorted disables sorting and can therefore not be combined with other values.
|
|
||||||
Unsorted = []SortByField{"none"}
|
|
||||||
// DefaultSorted does not specify how to sort and
|
|
||||||
// therefore implies the default sort order.
|
|
||||||
DefaultSorted = []SortByField{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// CopyOrMoveOperationType represents the possible types of copy- and move-operations.
|
|
||||||
type CopyOrMoveOperationType int
|
|
||||||
|
|
||||||
// possible values for CopyOrMoveOperationType
|
|
||||||
const (
|
|
||||||
MoveOriginal CopyOrMoveOperationType = iota
|
|
||||||
CopyOriginal
|
|
||||||
CopyOriginalPreserveModTime
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnExistAction represents possible actions the API should take,
|
|
||||||
// when a request tries to create a path that already exists.
|
|
||||||
type OnExistAction string
|
|
||||||
|
|
||||||
// possible values for OnExistAction
|
|
||||||
const (
|
|
||||||
// IgnoreOnExist instructs the API not to execute
|
|
||||||
// the request in case of a conflict, but to return an error.
|
|
||||||
IgnoreOnExist OnExistAction = "ignore"
|
|
||||||
// AutoNameOnExist instructs the API to automatically rename
|
|
||||||
// any conflicting request-objects.
|
|
||||||
AutoNameOnExist OnExistAction = "autoname"
|
|
||||||
// OverwriteOnExist instructs the API to overwrite any conflicting files.
|
|
||||||
// This can only be used, if the request operates on files directly.
|
|
||||||
// (For example when moving/copying a file.)
|
|
||||||
// For most requests this action will simply be ignored.
|
|
||||||
OverwriteOnExist OnExistAction = "overwrite"
|
|
||||||
)
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
|
|
||||||
// It tries to expire/invalidate the token, if necessary.
|
|
||||||
// It returns the err as a convenience.
|
|
||||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
|
|
||||||
fs.Debugf(f, "Token might be invalid: %v", err)
|
|
||||||
if f.tokenRenewer != nil {
|
|
||||||
iErr := f.tokenRenewer.Expire()
|
|
||||||
if iErr == nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolvePath resolves the given (relative) path and
|
|
||||||
// returns a path suitable for API-calls.
|
|
||||||
// This will consider the root-path of the fs and any needed prefixes.
|
|
||||||
//
|
|
||||||
// Any relative paths passed to functions that access these paths should
|
|
||||||
// be resolved with this first!
|
|
||||||
func (f *Fs) resolvePath(objectPath string) string {
|
|
||||||
resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
|
|
||||||
return resolved
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterateOverDirectory calls the given function callback
|
|
||||||
// on each item found in a given directory.
|
|
||||||
//
|
|
||||||
// If callback ever returns true then this exits early with found = true.
|
|
||||||
func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetPath(directory)
|
|
||||||
parameters.AddFields("members.", fields...)
|
|
||||||
parameters.AddFields("", api.DirectoryContentFields...)
|
|
||||||
parameters.Set("members", string(searchOnly))
|
|
||||||
for _, v := range sortBy {
|
|
||||||
// The explicit conversion is necessary for each element.
|
|
||||||
parameters.AddList("sort", ",", string(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/dir",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
}
|
|
||||||
|
|
||||||
iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
for _, item := range result.Entries {
|
|
||||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
|
||||||
if callback(&item) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// paginateDirectoryAccess executes requests specified via ctx and opts
|
|
||||||
// which should produce api.DirectoryContent.
|
|
||||||
// This will paginate the requests using limit starting at the given offset.
|
|
||||||
//
|
|
||||||
// The given function callback is called on each api.DirectoryContent found
|
|
||||||
// along with any errors that occurred.
|
|
||||||
// If callback ever returns true then this exits early with found = true.
|
|
||||||
// If callback ever returns an error then this exits early with that error.
|
|
||||||
func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
|
|
||||||
for {
|
|
||||||
opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
|
|
||||||
|
|
||||||
var result api.DirectoryContent
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
found, err = callback(&result, err)
|
|
||||||
if found || err != nil {
|
|
||||||
return found, err
|
|
||||||
}
|
|
||||||
|
|
||||||
offset += int64(len(result.Entries))
|
|
||||||
if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchMetadataForPath reads the metadata from the path.
|
|
||||||
func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetPath(path)
|
|
||||||
parameters.AddFields("", fields...)
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/meta",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.HiDriveObject
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyOrMove copies or moves a directory or file
|
|
||||||
// from the source-path to the destination-path.
|
|
||||||
//
|
|
||||||
// The operation will only be successful
|
|
||||||
// if the parent-directory of the destination-path exists.
|
|
||||||
//
|
|
||||||
// NOTE: Use the explicit methods instead of directly invoking this method.
|
|
||||||
// (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
|
|
||||||
func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.Set("src", source)
|
|
||||||
parameters.Set("dst", destination)
|
|
||||||
if onExist == AutoNameOnExist ||
|
|
||||||
(onExist == OverwriteOnExist && !isDirectory) {
|
|
||||||
parameters.Set("on_exist", string(onExist))
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := "/"
|
|
||||||
if isDirectory {
|
|
||||||
endpoint += "dir"
|
|
||||||
} else {
|
|
||||||
endpoint += "file"
|
|
||||||
}
|
|
||||||
switch operationType {
|
|
||||||
case MoveOriginal:
|
|
||||||
endpoint += "/move"
|
|
||||||
case CopyOriginalPreserveModTime:
|
|
||||||
parameters.Set("preserve_mtime", strconv.FormatBool(true))
|
|
||||||
fallthrough
|
|
||||||
case CopyOriginal:
|
|
||||||
endpoint += "/copy"
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: endpoint,
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.HiDriveObject
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
|
||||||
// returns the resulting api-object if successful.
|
|
||||||
//
|
|
||||||
// The operation will only be successful
|
|
||||||
// if the parent-directory of the destination-path exists.
|
|
||||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
|
||||||
// returns the resulting api-object if successful.
|
|
||||||
//
|
|
||||||
// The operation will only be successful
|
|
||||||
// if the parent-directory of the destination-path exists.
|
|
||||||
func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyFile copies the file at the source-path to the destination-path and
|
|
||||||
// returns the resulting api-object if successful.
|
|
||||||
//
|
|
||||||
// The operation will only be successful
|
|
||||||
// if the parent-directory of the destination-path exists.
|
|
||||||
//
|
|
||||||
// NOTE: This operation will expand sparse areas in the content of the source-file
|
|
||||||
// to blocks of 0-bytes in the destination-file.
|
|
||||||
func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// moveFile moves the file at the source-path to the destination-path and
|
|
||||||
// returns the resulting api-object if successful.
|
|
||||||
//
|
|
||||||
// The operation will only be successful
|
|
||||||
// if the parent-directory of the destination-path exists.
|
|
||||||
//
|
|
||||||
// NOTE: This operation may expand sparse areas in the content of the source-file
|
|
||||||
// to blocks of 0-bytes in the destination-file.
|
|
||||||
func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createDirectory creates the directory at the given path and
|
|
||||||
// returns the resulting api-object if successful.
|
|
||||||
//
|
|
||||||
// The directory will only be created if its parent-directory exists.
|
|
||||||
// This returns fs.ErrorDirNotFound if the parent-directory is not found.
|
|
||||||
// This returns fs.ErrorDirExists if the directory already exists.
|
|
||||||
func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetPath(directory)
|
|
||||||
if onExist == AutoNameOnExist {
|
|
||||||
parameters.Set("on_exist", string(onExist))
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/dir",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.HiDriveObject
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
return &result, nil
|
|
||||||
case isHTTPError(err, 404):
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
case isHTTPError(err, 409):
|
|
||||||
return nil, fs.ErrorDirExists
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// createDirectories creates the directory at the given path
|
|
||||||
// along with any missing parent directories and
|
|
||||||
// returns the resulting api-object (of the created directory) if successful.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorDirExists if the directory already exists.
|
|
||||||
//
|
|
||||||
// If an error occurs while the parent directories are being created,
|
|
||||||
// any directories already created will NOT be deleted again.
|
|
||||||
func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
result, err := f.createDirectory(ctx, directory, onExist)
|
|
||||||
if err == nil {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
if err != fs.ErrorDirNotFound {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
parentDirectory := path.Dir(directory)
|
|
||||||
_, err = f.createDirectories(ctx, parentDirectory, onExist)
|
|
||||||
if err != nil && err != fs.ErrorDirExists {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// NOTE: Ignoring fs.ErrorDirExists does no harm,
|
|
||||||
// since it does not mean the child directory cannot be created.
|
|
||||||
return f.createDirectory(ctx, directory, onExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteDirectory deletes the directory at the given path.
|
|
||||||
//
|
|
||||||
// If recursive is false, the directory will only be deleted if it is empty.
|
|
||||||
// If recursive is true, the directory will be deleted regardless of its content.
|
|
||||||
// This returns fs.ErrorDirNotFound if the directory is not found.
|
|
||||||
// This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
|
|
||||||
// recursive is false.
|
|
||||||
func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetPath(directory)
|
|
||||||
parameters.Set("recursive", strconv.FormatBool(recursive))
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "DELETE",
|
|
||||||
Path: "/dir",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
NoResponse: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.Call(ctx, &opts)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case isHTTPError(err, 404):
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
case isHTTPError(err, 409):
|
|
||||||
return fs.ErrorDirectoryNotEmpty
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteObject deletes the object/file at the given path.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
|
||||||
func (f *Fs) deleteObject(ctx context.Context, path string) error {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetPath(path)
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "DELETE",
|
|
||||||
Path: "/file",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
NoResponse: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.Call(ctx, &opts)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if isHTTPError(err, 404) {
|
|
||||||
return fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// createFile creates a file at the given path
|
|
||||||
// with the content of the io.ReadSeeker.
|
|
||||||
// This guarantees that existing files will not be overwritten.
|
|
||||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
|
||||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
|
||||||
// If modTime is not the zero time instant,
|
|
||||||
// it will be set as the file's modification time after the operation.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorDirNotFound
|
|
||||||
// if the parent directory of the file is not found.
|
|
||||||
// This returns ErrorFileExists if a file already exists at the specified path.
|
|
||||||
func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetFileInDirectory(path)
|
|
||||||
if onExist == AutoNameOnExist {
|
|
||||||
parameters.Set("on_exist", string(onExist))
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if !modTime.IsZero() {
|
|
||||||
err = parameters.SetTime("mtime", modTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/file",
|
|
||||||
Body: content,
|
|
||||||
ContentType: "application/octet-stream",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.HiDriveObject
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
// Reset the reading index (in case this is a retry).
|
|
||||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
return &result, nil
|
|
||||||
case isHTTPError(err, 404):
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
case isHTTPError(err, 409):
|
|
||||||
return nil, ErrorFileExists
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// overwriteFile updates the content of the file at the given path
|
|
||||||
// with the content of the io.ReadSeeker.
|
|
||||||
// If the file does not exist it will be created.
|
|
||||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
|
||||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
|
||||||
// If modTime is not the zero time instant,
|
|
||||||
// it will be set as the file's modification time after the operation.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorDirNotFound
|
|
||||||
// if the parent directory of the file is not found.
|
|
||||||
func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetFileInDirectory(path)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if !modTime.IsZero() {
|
|
||||||
err = parameters.SetTime("mtime", modTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "PUT",
|
|
||||||
Path: "/file",
|
|
||||||
Body: content,
|
|
||||||
ContentType: "application/octet-stream",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.HiDriveObject
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
// Reset the reading index (in case this is a retry).
|
|
||||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
return &result, nil
|
|
||||||
case isHTTPError(err, 404):
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// uploadFileChunked updates the content of the existing file at the given path
|
|
||||||
// with the content of the io.Reader.
|
|
||||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
|
||||||
// If nothing was written this will be 0.
|
|
||||||
// Returns the resulting api-object if successful.
|
|
||||||
//
|
|
||||||
// Replaces the file contents by uploading multiple chunks of the given size in parallel.
|
|
||||||
// Therefore this can and be used to upload files of any size efficiently.
|
|
||||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
|
||||||
// If modTime is not the zero time instant,
|
|
||||||
// it will be set as the file's modification time after the operation.
|
|
||||||
//
|
|
||||||
// NOTE: This method uses updateFileChunked and may create sparse files,
|
|
||||||
// if the upload of a chunk fails unexpectedly.
|
|
||||||
// See note about sparse files in patchFile.
|
|
||||||
// If any of the uploads fail, the process will be aborted and
|
|
||||||
// the first error that occurred will be returned.
|
|
||||||
// This is not an atomic operation,
|
|
||||||
// therefore if the upload fails the file may be partially modified.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
|
||||||
func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
|
|
||||||
okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
info, err = f.resizeFile(ctx, path, okSize, modTime)
|
|
||||||
}
|
|
||||||
return okSize, info, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateFileChunked updates the content of the existing file at the given path
|
|
||||||
// starting at the given offset.
|
|
||||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
|
||||||
// If nothing was written this will be 0.
|
|
||||||
//
|
|
||||||
// Replaces the file contents starting from the given byte offset
|
|
||||||
// with the content of the io.Reader.
|
|
||||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
|
||||||
//
|
|
||||||
// The upload is done multiple chunks of the given size in parallel.
|
|
||||||
// Therefore this can and be used to upload files of any size efficiently.
|
|
||||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
|
||||||
//
|
|
||||||
// NOTE: Because it is inefficient to set the modification time with every chunk,
|
|
||||||
// setting it to a specific value must be done in a separate request
|
|
||||||
// after this operation finishes.
|
|
||||||
//
|
|
||||||
// NOTE: This method uses patchFile and may create sparse files,
|
|
||||||
// especially if the upload of a chunk fails unexpectedly.
|
|
||||||
// See note about sparse files in patchFile.
|
|
||||||
// If any of the uploads fail, the process will be aborted and
|
|
||||||
// the first error that occurred will be returned.
|
|
||||||
// This is not an atomic operation,
|
|
||||||
// therefore if the upload fails the file may be partially modified.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
|
||||||
func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
|
|
||||||
var (
|
|
||||||
okChunksMu sync.Mutex // protects the variables below
|
|
||||||
okChunks []ranges.Range
|
|
||||||
)
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
transferSemaphore := semaphore.NewWeighted(transferLimit)
|
|
||||||
|
|
||||||
var readErr error
|
|
||||||
startMoreTransfers := true
|
|
||||||
zeroTime := time.Time{}
|
|
||||||
for chunk := uint64(0); startMoreTransfers; chunk++ {
|
|
||||||
// Acquire semaphore to limit number of transfers in parallel.
|
|
||||||
readErr = transferSemaphore.Acquire(gCtx, 1)
|
|
||||||
if readErr != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read a chunk of data.
|
|
||||||
chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
|
|
||||||
if bytesRead < chunkSize {
|
|
||||||
startMoreTransfers = false
|
|
||||||
}
|
|
||||||
if readErr != nil || bytesRead <= 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transfer the chunk.
|
|
||||||
chunkOffset := uint64(chunkSize)*chunk + offset
|
|
||||||
g.Go(func() error {
|
|
||||||
// After this upload is done,
|
|
||||||
// signal that another transfer can be started.
|
|
||||||
defer transferSemaphore.Release(1)
|
|
||||||
uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
|
|
||||||
if uploadErr == nil {
|
|
||||||
// Remember successfully written chunks.
|
|
||||||
okChunksMu.Lock()
|
|
||||||
okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
|
|
||||||
okChunksMu.Unlock()
|
|
||||||
fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
|
|
||||||
} else {
|
|
||||||
fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
|
|
||||||
}
|
|
||||||
return uploadErr
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if readErr != nil {
|
|
||||||
// Log the error in case it is later ignored because of an upload-error.
|
|
||||||
fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = g.Wait()
|
|
||||||
|
|
||||||
// Compute the first continuous range of the file content,
|
|
||||||
// which does not contain any failed chunks.
|
|
||||||
// Do not forget to add the file content up to the starting offset,
|
|
||||||
// which is presumed to be already correct.
|
|
||||||
rs := ranges.Ranges{}
|
|
||||||
rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
|
|
||||||
for _, chunkRange := range okChunks {
|
|
||||||
rs.Insert(chunkRange)
|
|
||||||
}
|
|
||||||
if len(rs) > 0 && rs[0].Pos == 0 {
|
|
||||||
okSize = uint64(rs[0].Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return okSize, err
|
|
||||||
}
|
|
||||||
if readErr != nil {
|
|
||||||
return okSize, readErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return okSize, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// patchFile updates the content of the existing file at the given path
|
|
||||||
// starting at the given offset.
|
|
||||||
//
|
|
||||||
// Replaces the file contents starting from the given byte offset
|
|
||||||
// with the content of the io.ReadSeeker.
|
|
||||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
|
||||||
// The maximum size of the update is limited by MaximumUploadBytes.
|
|
||||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
|
||||||
// If modTime is not the zero time instant,
|
|
||||||
// it will be set as the file's modification time after the operation.
|
|
||||||
//
|
|
||||||
// NOTE: By extending the file up to the offset this may create sparse files,
|
|
||||||
// which allocate less space on the file system than their apparent size indicates,
|
|
||||||
// since holes between data chunks are "real" holes
|
|
||||||
// and not regions made up of consecutive 0-bytes.
|
|
||||||
// Subsequent operations (such as copying data)
|
|
||||||
// usually expand the holes into regions of 0-bytes.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
|
||||||
func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetPath(path)
|
|
||||||
parameters.Set("offset", strconv.FormatUint(offset, 10))
|
|
||||||
|
|
||||||
if !modTime.IsZero() {
|
|
||||||
err := parameters.SetTime("mtime", modTime)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "PATCH",
|
|
||||||
Path: "/file",
|
|
||||||
Body: content,
|
|
||||||
ContentType: "application/octet-stream",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
NoResponse: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
// Reset the reading index (in case this is a retry).
|
|
||||||
_, err = content.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
resp, err = f.srv.Call(ctx, &opts)
|
|
||||||
if isHTTPError(err, 423) {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if isHTTPError(err, 404) {
|
|
||||||
return fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// resizeFile updates the existing file at the given path to be of the given size
|
|
||||||
// and returns the resulting api-object if successful.
|
|
||||||
//
|
|
||||||
// If the given size is smaller than the current filesize,
|
|
||||||
// the file is cut/truncated at that position.
|
|
||||||
// If the given size is larger, the file is extended up to that position.
|
|
||||||
// If modTime is not the zero time instant,
|
|
||||||
// it will be set as the file's modification time after the operation.
|
|
||||||
//
|
|
||||||
// NOTE: By extending the file this may create sparse files,
|
|
||||||
// which allocate less space on the file system than their apparent size indicates,
|
|
||||||
// since holes between data chunks are "real" holes
|
|
||||||
// and not regions made up of consecutive 0-bytes.
|
|
||||||
// Subsequent operations (such as copying data)
|
|
||||||
// usually expand the holes into regions of 0-bytes.
|
|
||||||
//
|
|
||||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
|
||||||
func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
|
|
||||||
parameters := api.NewQueryParameters()
|
|
||||||
parameters.SetPath(path)
|
|
||||||
parameters.Set("size", strconv.FormatUint(size, 10))
|
|
||||||
|
|
||||||
if !modTime.IsZero() {
|
|
||||||
err := parameters.SetTime("mtime", modTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/file/truncate",
|
|
||||||
Parameters: parameters.Values,
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.HiDriveObject
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
return &result, nil
|
|
||||||
case isHTTPError(err, 404):
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// isHTTPError compares the numerical status code
|
|
||||||
// of an api.Error to the given HTTP status.
|
|
||||||
//
|
|
||||||
// If the given error is not an api.Error or
|
|
||||||
// a numerical status code could not be determined, this returns false.
|
|
||||||
// Otherwise this returns whether the status code of the error is equal to the given status.
|
|
||||||
func isHTTPError(err error, status int64) bool {
|
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
|
||||||
errStatus, decodeErr := apiErr.Code.Int64()
|
|
||||||
if decodeErr == nil && errStatus == status {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// createHiDriveScopes creates oauth-scopes
|
|
||||||
// from the given user-role and access-permissions.
|
|
||||||
//
|
|
||||||
// If the arguments are empty, they will not be included in the result.
|
|
||||||
func createHiDriveScopes(role string, access string) []string {
|
|
||||||
switch {
|
|
||||||
case role != "" && access != "":
|
|
||||||
return []string{access + "," + role}
|
|
||||||
case role != "":
|
|
||||||
return []string{role}
|
|
||||||
case access != "":
|
|
||||||
return []string{access}
|
|
||||||
}
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedReader returns a version of the reader that caches its contents and
|
|
||||||
// can therefore be reset using Seek.
|
|
||||||
func cachedReader(reader io.Reader) io.ReadSeeker {
|
|
||||||
bytesReader, ok := reader.(*bytes.Reader)
|
|
||||||
if ok {
|
|
||||||
return bytesReader
|
|
||||||
}
|
|
||||||
|
|
||||||
repeatableReader, ok := reader.(*readers.RepeatableReader)
|
|
||||||
if ok {
|
|
||||||
return repeatableReader
|
|
||||||
}
|
|
||||||
|
|
||||||
return readers.NewRepeatableReader(reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readerForChunk reads a chunk of bytes from reader (after handling any accounting).
|
|
||||||
// Returns a new io.Reader (chunkReader) for that chunk
|
|
||||||
// and the number of bytes that have been read from reader.
|
|
||||||
func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
|
|
||||||
// Unwrap any accounting from the input if present.
|
|
||||||
reader, wrap := accounting.UnWrap(reader)
|
|
||||||
|
|
||||||
// Read a chunk of data.
|
|
||||||
buffer := make([]byte, length)
|
|
||||||
bytesRead, err = io.ReadFull(reader, buffer)
|
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, bytesRead, err
|
|
||||||
}
|
|
||||||
// Truncate unused capacity.
|
|
||||||
buffer = buffer[:bytesRead]
|
|
||||||
|
|
||||||
// Use wrap to put any accounting back for chunkReader.
|
|
||||||
return wrap(bytes.NewReader(buffer)), bytesRead, nil
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,45 +0,0 @@
|
|||||||
// Test HiDrive filesystem interface
|
|
||||||
package hidrive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote.
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
name := "TestHiDrive"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MinChunkSize: 1,
|
|
||||||
MaxChunkSize: MaximumUploadBytes,
|
|
||||||
CeilChunkSize: nil,
|
|
||||||
NeedMultipleChunks: false,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Change the configured UploadChunkSize.
|
|
||||||
// Will only be called while no transfer is in progress.
|
|
||||||
func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
var old fs.SizeSuffix
|
|
||||||
old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize
|
|
||||||
return old, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Change the configured UploadCutoff.
|
|
||||||
// Will only be called while no transfer is in progress.
|
|
||||||
func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
var old fs.SizeSuffix
|
|
||||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff
|
|
||||||
return old, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,410 +0,0 @@
|
|||||||
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash.
|
|
||||||
//
|
|
||||||
// Note: This implementation does not grant access to any partial hashes generated.
|
|
||||||
//
|
|
||||||
// See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf
|
|
||||||
// (link to newest version: https://static.hidrive.com/dev/0001)
|
|
||||||
package hidrivehash
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/sha1"
|
|
||||||
"encoding"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// BlockSize of the checksum in bytes.
|
|
||||||
BlockSize = 4096
|
|
||||||
// Size of the checksum in bytes.
|
|
||||||
Size = sha1.Size
|
|
||||||
// sumsPerLevel is the number of checksums
|
|
||||||
sumsPerLevel = 256
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// zeroSum is a special hash consisting of 20 null-bytes.
|
|
||||||
// This will be the hash of any empty file (or ones containing only null-bytes).
|
|
||||||
zeroSum = [Size]byte{}
|
|
||||||
// ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid.
|
|
||||||
ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash")
|
|
||||||
// ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input.
|
|
||||||
ErrorHashFull = errors.New("hash reached its capacity")
|
|
||||||
)
|
|
||||||
|
|
||||||
// writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize.
|
|
||||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
|
||||||
// and any error encountered that caused the write to stop early.
|
|
||||||
//
|
|
||||||
// A pointer bytesInBlock to a counter needs to be supplied,
|
|
||||||
// that is used to keep track how many bytes have been written to the writer already.
|
|
||||||
// A pointer onlyNullBytesInBlock to a boolean needs to be supplied,
|
|
||||||
// that is used to keep track whether the block so far only consists of null-bytes.
|
|
||||||
// The callback onBlockWritten is called whenever a full block has been written to the writer
|
|
||||||
// and is given as input the number of bytes that still need to be written.
|
|
||||||
func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) {
|
|
||||||
total := len(p)
|
|
||||||
nullBytes := make([]byte, blockSize)
|
|
||||||
for len(p) > 0 {
|
|
||||||
toWrite := int(blockSize - *bytesInBlock)
|
|
||||||
if toWrite > len(p) {
|
|
||||||
toWrite = len(p)
|
|
||||||
}
|
|
||||||
c, err := writer.Write(p[:toWrite])
|
|
||||||
*bytesInBlock += uint32(c)
|
|
||||||
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
|
||||||
// Discard data written through a reslice
|
|
||||||
p = p[c:]
|
|
||||||
if err != nil {
|
|
||||||
return total - len(p), err
|
|
||||||
}
|
|
||||||
if *bytesInBlock == blockSize {
|
|
||||||
err = onBlockWritten(len(p))
|
|
||||||
if err != nil {
|
|
||||||
return total - len(p), err
|
|
||||||
}
|
|
||||||
*bytesInBlock = 0
|
|
||||||
*onlyNullBytesInBlock = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it.
|
|
||||||
// It is used to represent any level-n hash, except for level-0.
|
|
||||||
type level struct {
|
|
||||||
checksum [Size]byte // aggregated checksum of this level
|
|
||||||
sumCount uint32 // number of sums contained in this level so far
|
|
||||||
bytesInHasher uint32 // number of bytes written into hasher so far
|
|
||||||
onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far
|
|
||||||
hasher hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLevel returns a new hash.Hash computing any level-n hash, except level-0.
|
|
||||||
func NewLevel() hash.Hash {
|
|
||||||
l := &level{}
|
|
||||||
l.Reset()
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add takes a position-embedded SHA-1 checksum and adds it to the level.
|
|
||||||
func (l *level) Add(sha1sum []byte) {
|
|
||||||
var tmp uint
|
|
||||||
var carry bool
|
|
||||||
for i := Size - 1; i >= 0; i-- {
|
|
||||||
tmp = uint(sha1sum[i]) + uint(l.checksum[i])
|
|
||||||
if carry {
|
|
||||||
tmp++
|
|
||||||
}
|
|
||||||
carry = tmp > 255
|
|
||||||
l.checksum[i] = byte(tmp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
|
||||||
func (l *level) IsFull() bool {
|
|
||||||
return l.sumCount >= sumsPerLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
|
||||||
// Contrary to the specification from hash.Hash, this DOES return an error,
|
|
||||||
// specifically ErrorHashFull if and only if IsFull() returns true.
|
|
||||||
func (l *level) Write(p []byte) (n int, err error) {
|
|
||||||
if l.IsFull() {
|
|
||||||
return 0, ErrorHashFull
|
|
||||||
}
|
|
||||||
onBlockWritten := func(remaining int) error {
|
|
||||||
if !l.onlyNullBytesInHasher {
|
|
||||||
c, err := l.hasher.Write([]byte{byte(l.sumCount)})
|
|
||||||
l.bytesInHasher += uint32(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.Add(l.hasher.Sum(nil))
|
|
||||||
}
|
|
||||||
l.sumCount++
|
|
||||||
l.hasher.Reset()
|
|
||||||
if remaining > 0 && l.IsFull() {
|
|
||||||
return ErrorHashFull
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum appends the current hash to b and returns the resulting slice.
|
|
||||||
// It does not change the underlying hash state.
|
|
||||||
func (l *level) Sum(b []byte) []byte {
|
|
||||||
return append(b, l.checksum[:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the Hash to its initial state.
|
|
||||||
func (l *level) Reset() {
|
|
||||||
l.checksum = zeroSum // clear the current checksum
|
|
||||||
l.sumCount = 0
|
|
||||||
l.bytesInHasher = 0
|
|
||||||
l.onlyNullBytesInHasher = true
|
|
||||||
l.hasher = sha1.New()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the number of bytes Sum will return.
|
|
||||||
func (l *level) Size() int {
|
|
||||||
return Size
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockSize returns the hash's underlying block size.
|
|
||||||
// The Write method must be able to accept any amount
|
|
||||||
// of data, but it may operate more efficiently if all writes
|
|
||||||
// are a multiple of the block size.
|
|
||||||
func (l *level) BlockSize() int {
|
|
||||||
return Size
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
|
||||||
func (l *level) MarshalBinary() ([]byte, error) {
|
|
||||||
b := make([]byte, Size+4+4+1)
|
|
||||||
copy(b, l.checksum[:])
|
|
||||||
binary.BigEndian.PutUint32(b[Size:], l.sumCount)
|
|
||||||
binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher)
|
|
||||||
if l.onlyNullBytesInHasher {
|
|
||||||
b[Size+4+4] = 1
|
|
||||||
}
|
|
||||||
encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b = append(b, encodedHasher...)
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
|
||||||
// The hash will replace its internal state accordingly.
|
|
||||||
func (l *level) UnmarshalBinary(b []byte) error {
|
|
||||||
if len(b) < Size+4+4+1 {
|
|
||||||
return ErrorInvalidEncoding
|
|
||||||
}
|
|
||||||
copy(l.checksum[:], b)
|
|
||||||
l.sumCount = binary.BigEndian.Uint32(b[Size:])
|
|
||||||
l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:])
|
|
||||||
switch b[Size+4+4] {
|
|
||||||
case 0:
|
|
||||||
l.onlyNullBytesInHasher = false
|
|
||||||
case 1:
|
|
||||||
l.onlyNullBytesInHasher = true
|
|
||||||
default:
|
|
||||||
return ErrorInvalidEncoding
|
|
||||||
}
|
|
||||||
err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes.
|
|
||||||
type hidriveHash struct {
|
|
||||||
levels []*level // collection of level-hashes, one for each level starting at level-1
|
|
||||||
lastSumWritten [Size]byte // the last checksum written to any of the levels
|
|
||||||
bytesInBlock uint32 // bytes written into blockHash so far
|
|
||||||
onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far
|
|
||||||
blockHash hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new hash.Hash computing the HiDrive checksum.
|
|
||||||
func New() hash.Hash {
|
|
||||||
h := &hidriveHash{}
|
|
||||||
h.Reset()
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// aggregateToLevel writes the checksum to the level at the given index
|
|
||||||
// and if necessary propagates any changes to levels above.
|
|
||||||
func (h *hidriveHash) aggregateToLevel(index int, sum []byte) {
|
|
||||||
for i := index; ; i++ {
|
|
||||||
if i >= len(h.levels) {
|
|
||||||
h.levels = append(h.levels, NewLevel().(*level))
|
|
||||||
}
|
|
||||||
_, err := h.levels[i].Write(sum)
|
|
||||||
copy(h.lastSumWritten[:], sum)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("level-hash should not have produced an error: %w", err))
|
|
||||||
}
|
|
||||||
if !h.levels[i].IsFull() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sum = h.levels[i].Sum(nil)
|
|
||||||
h.levels[i].Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
|
||||||
// It never returns an error.
|
|
||||||
func (h *hidriveHash) Write(p []byte) (n int, err error) {
|
|
||||||
onBlockWritten := func(remaining int) error {
|
|
||||||
var sum []byte
|
|
||||||
if h.onlyNullBytesInBlock {
|
|
||||||
sum = zeroSum[:]
|
|
||||||
} else {
|
|
||||||
sum = h.blockHash.Sum(nil)
|
|
||||||
}
|
|
||||||
h.blockHash.Reset()
|
|
||||||
h.aggregateToLevel(0, sum)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum appends the current hash to b and returns the resulting slice.
|
|
||||||
// It does not change the underlying hash state.
|
|
||||||
func (h *hidriveHash) Sum(b []byte) []byte {
|
|
||||||
// Save internal state.
|
|
||||||
state, err := h.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.bytesInBlock > 0 {
|
|
||||||
// Fill remainder of block with null-bytes.
|
|
||||||
filler := make([]byte, h.BlockSize()-int(h.bytesInBlock))
|
|
||||||
_, err = h.Write(filler)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checksum := zeroSum
|
|
||||||
for i := 0; i < len(h.levels); i++ {
|
|
||||||
level := h.levels[i]
|
|
||||||
if i < len(h.levels)-1 {
|
|
||||||
// Aggregate non-empty non-final levels.
|
|
||||||
if level.sumCount >= 1 {
|
|
||||||
h.aggregateToLevel(i+1, level.Sum(nil))
|
|
||||||
level.Reset()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Determine sum of final level.
|
|
||||||
if level.sumCount > 1 {
|
|
||||||
copy(checksum[:], level.Sum(nil))
|
|
||||||
} else {
|
|
||||||
// This is needed, otherwise there is no way to return
|
|
||||||
// the non-position-embedded checksum.
|
|
||||||
checksum = h.lastSumWritten
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore internal state.
|
|
||||||
err = h.UnmarshalBinary(state)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return append(b, checksum[:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the Hash to its initial state.
|
|
||||||
func (h *hidriveHash) Reset() {
|
|
||||||
h.levels = nil
|
|
||||||
h.lastSumWritten = zeroSum // clear the last written checksum
|
|
||||||
h.bytesInBlock = 0
|
|
||||||
h.onlyNullBytesInBlock = true
|
|
||||||
h.blockHash = sha1.New()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the number of bytes Sum will return.
|
|
||||||
func (h *hidriveHash) Size() int {
|
|
||||||
return Size
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockSize returns the hash's underlying block size.
|
|
||||||
// The Write method must be able to accept any amount
|
|
||||||
// of data, but it may operate more efficiently if all writes
|
|
||||||
// are a multiple of the block size.
|
|
||||||
func (h *hidriveHash) BlockSize() int {
|
|
||||||
return BlockSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
|
||||||
func (h *hidriveHash) MarshalBinary() ([]byte, error) {
|
|
||||||
b := make([]byte, Size+4+1+8)
|
|
||||||
copy(b, h.lastSumWritten[:])
|
|
||||||
binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock)
|
|
||||||
if h.onlyNullBytesInBlock {
|
|
||||||
b[Size+4] = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels)))
|
|
||||||
for _, level := range h.levels {
|
|
||||||
encodedLevel, err := level.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
encodedLength := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel)))
|
|
||||||
b = append(b, encodedLength...)
|
|
||||||
b = append(b, encodedLevel...)
|
|
||||||
}
|
|
||||||
encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b = append(b, encodedBlockHash...)
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
|
||||||
// The hash will replace its internal state accordingly.
|
|
||||||
func (h *hidriveHash) UnmarshalBinary(b []byte) error {
|
|
||||||
if len(b) < Size+4+1+8 {
|
|
||||||
return ErrorInvalidEncoding
|
|
||||||
}
|
|
||||||
copy(h.lastSumWritten[:], b)
|
|
||||||
h.bytesInBlock = binary.BigEndian.Uint32(b[Size:])
|
|
||||||
switch b[Size+4] {
|
|
||||||
case 0:
|
|
||||||
h.onlyNullBytesInBlock = false
|
|
||||||
case 1:
|
|
||||||
h.onlyNullBytesInBlock = true
|
|
||||||
default:
|
|
||||||
return ErrorInvalidEncoding
|
|
||||||
}
|
|
||||||
|
|
||||||
amount := binary.BigEndian.Uint64(b[Size+4+1:])
|
|
||||||
h.levels = make([]*level, int(amount))
|
|
||||||
offset := Size + 4 + 1 + 8
|
|
||||||
for i := range h.levels {
|
|
||||||
length := int(binary.BigEndian.Uint64(b[offset:]))
|
|
||||||
offset += 8
|
|
||||||
h.levels[i] = NewLevel().(*level)
|
|
||||||
err := h.levels[i].UnmarshalBinary(b[offset : offset+length])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
offset += length
|
|
||||||
}
|
|
||||||
err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum returns the HiDrive checksum of the data.
|
|
||||||
func Sum(data []byte) [Size]byte {
|
|
||||||
h := New().(*hidriveHash)
|
|
||||||
_, _ = h.Write(data)
|
|
||||||
var result [Size]byte
|
|
||||||
copy(result[:], h.Sum(nil))
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied.
|
|
||||||
var (
|
|
||||||
_ hash.Hash = (*level)(nil)
|
|
||||||
_ encoding.BinaryMarshaler = (*level)(nil)
|
|
||||||
_ encoding.BinaryUnmarshaler = (*level)(nil)
|
|
||||||
_ internal.LevelHash = (*level)(nil)
|
|
||||||
_ hash.Hash = (*hidriveHash)(nil)
|
|
||||||
_ encoding.BinaryMarshaler = (*hidriveHash)(nil)
|
|
||||||
_ encoding.BinaryUnmarshaler = (*hidriveHash)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,395 +0,0 @@
|
|||||||
package hidrivehash_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha1"
|
|
||||||
"encoding"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash"
|
|
||||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
// helper functions to set up test-tables
|
|
||||||
|
|
||||||
func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte {
|
|
||||||
return sum[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustDecode(hexstring string) []byte {
|
|
||||||
result, err := hex.DecodeString(hexstring)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
var testTableLevelPositionEmbedded = []struct {
|
|
||||||
ins [][]byte
|
|
||||||
outs [][]byte
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
[][]byte{
|
|
||||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
|
||||||
sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}),
|
|
||||||
},
|
|
||||||
[][]byte{
|
|
||||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
|
||||||
sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}),
|
|
||||||
},
|
|
||||||
"documentation-v3.2rev27-example L0 (position-embedded)",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[][]byte{
|
|
||||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
|
||||||
sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}),
|
|
||||||
sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}),
|
|
||||||
},
|
|
||||||
[][]byte{
|
|
||||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
|
||||||
sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}),
|
|
||||||
sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}),
|
|
||||||
},
|
|
||||||
"documentation-example L0 (position-embedded)",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[][]byte{
|
|
||||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
|
||||||
sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}),
|
|
||||||
sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}),
|
|
||||||
},
|
|
||||||
[][]byte{
|
|
||||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
|
||||||
sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}),
|
|
||||||
sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}),
|
|
||||||
},
|
|
||||||
"documentation-example L1 (position-embedded)",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var testTableLevel = []struct {
|
|
||||||
ins [][]byte
|
|
||||||
outs [][]byte
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
[][]byte{
|
|
||||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
|
||||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
|
||||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
|
||||||
},
|
|
||||||
[][]byte{
|
|
||||||
mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"),
|
|
||||||
mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"),
|
|
||||||
mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"),
|
|
||||||
},
|
|
||||||
"documentation-example L0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[][]byte{
|
|
||||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
|
||||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
|
||||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
|
||||||
},
|
|
||||||
[][]byte{
|
|
||||||
mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"),
|
|
||||||
mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"),
|
|
||||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
|
||||||
},
|
|
||||||
"documentation-example L1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[][]byte{
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
[][]byte{
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
|
||||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
|
||||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
|
||||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
|
||||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
|
||||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
|
||||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
|
||||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
|
||||||
},
|
|
||||||
"mixed-with-empties",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var testTable = []struct {
|
|
||||||
data []byte
|
|
||||||
// pattern describes how to use data to construct the hash-input.
|
|
||||||
// For every entry n at even indices this repeats the data n times.
|
|
||||||
// For every entry m at odd indices this repeats a null-byte m times.
|
|
||||||
// The input-data is constructed by concatenating the results in order.
|
|
||||||
pattern []int64
|
|
||||||
out []byte
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
|
||||||
[]int64{64},
|
|
||||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
|
||||||
"documentation-example L0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
|
||||||
[]int64{64 * 256},
|
|
||||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
|
||||||
"documentation-example L1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
|
||||||
[]int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32},
|
|
||||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
|
||||||
"documentation-example L2",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]byte("hello rclone\n"),
|
|
||||||
[]int64{316},
|
|
||||||
mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"),
|
|
||||||
"not-block-aligned",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]byte("hello rclone\n"),
|
|
||||||
[]int64{13, 4096 * 3, 4},
|
|
||||||
mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"),
|
|
||||||
"not-block-aligned-with-null-bytes",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]byte{},
|
|
||||||
[]int64{},
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
"empty",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]byte{},
|
|
||||||
[]int64{0, 4096 * 256 * 256},
|
|
||||||
mustDecode("0000000000000000000000000000000000000000"),
|
|
||||||
"null-bytes",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
func TestLevelAdd(t *testing.T) {
|
|
||||||
for _, test := range testTableLevelPositionEmbedded {
|
|
||||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
for i := range test.ins {
|
|
||||||
l.Add(test.ins[i])
|
|
||||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLevelWrite(t *testing.T) {
|
|
||||||
for _, test := range testTableLevel {
|
|
||||||
l := hidrivehash.NewLevel()
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
for i := range test.ins {
|
|
||||||
l.Write(test.ins[i])
|
|
||||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLevelIsFull(t *testing.T) {
|
|
||||||
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
|
||||||
l := hidrivehash.NewLevel()
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
assert.False(t, l.(internal.LevelHash).IsFull())
|
|
||||||
written, err := l.Write(content[:])
|
|
||||||
assert.Equal(t, len(content), written)
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
|
||||||
written, err := l.Write(content[:])
|
|
||||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
|
||||||
assert.Equal(t, 0, written)
|
|
||||||
assert.ErrorIs(t, err, hidrivehash.ErrorHashFull)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLevelReset(t *testing.T) {
|
|
||||||
l := hidrivehash.NewLevel()
|
|
||||||
zeroHash := l.Sum(nil)
|
|
||||||
_, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19})
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.NotEqual(t, zeroHash, l.Sum(nil))
|
|
||||||
l.Reset()
|
|
||||||
assert.Equal(t, zeroHash, l.Sum(nil))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLevelSize(t *testing.T) {
|
|
||||||
l := hidrivehash.NewLevel()
|
|
||||||
assert.Equal(t, 20, l.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLevelBlockSize(t *testing.T) {
|
|
||||||
l := hidrivehash.NewLevel()
|
|
||||||
assert.Equal(t, 20, l.BlockSize())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLevelBinaryMarshaler(t *testing.T) {
|
|
||||||
content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
|
||||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
|
||||||
l.Write(content[:10])
|
|
||||||
encoded, err := l.MarshalBinary()
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
d := hidrivehash.NewLevel().(internal.LevelHash)
|
|
||||||
err = d.UnmarshalBinary(encoded)
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
|
||||||
l.Write(content[10:])
|
|
||||||
d.Write(content[10:])
|
|
||||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLevelInvalidEncoding(t *testing.T) {
|
|
||||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
|
||||||
err := l.UnmarshalBinary([]byte{})
|
|
||||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
type infiniteReader struct {
|
|
||||||
source []byte
|
|
||||||
offset int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *infiniteReader) Read(b []byte) (int, error) {
|
|
||||||
count := copy(b, m.source[m.offset:])
|
|
||||||
m.offset += count
|
|
||||||
m.offset %= len(m.source)
|
|
||||||
return count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error {
|
|
||||||
readers := make([]io.Reader, len(pattern))
|
|
||||||
nullBytes := [4096]byte{}
|
|
||||||
for i, n := range pattern {
|
|
||||||
if i%2 == 0 {
|
|
||||||
readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data)))
|
|
||||||
} else {
|
|
||||||
readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reader := io.MultiReader(readers...)
|
|
||||||
for {
|
|
||||||
_, err := io.CopyN(writer, reader, chunkSize)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWrite(t *testing.T) {
|
|
||||||
for _, test := range testTable {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
h := hidrivehash.New()
|
|
||||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern)
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
normalSum := h.Sum(nil)
|
|
||||||
assert.Equal(t, test.out, normalSum)
|
|
||||||
// Test if different block-sizes produce differing results.
|
|
||||||
for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} {
|
|
||||||
t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) {
|
|
||||||
h := hidrivehash.New()
|
|
||||||
err := writeInChunks(h, blockSize, test.data, test.pattern)
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.Equal(t, normalSum, h.Sum(nil))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReset(t *testing.T) {
|
|
||||||
h := hidrivehash.New()
|
|
||||||
zeroHash := h.Sum(nil)
|
|
||||||
_, err := h.Write([]byte{1})
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.NotEqual(t, zeroHash, h.Sum(nil))
|
|
||||||
h.Reset()
|
|
||||||
assert.Equal(t, zeroHash, h.Sum(nil))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSize(t *testing.T) {
|
|
||||||
h := hidrivehash.New()
|
|
||||||
assert.Equal(t, 20, h.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBlockSize(t *testing.T) {
|
|
||||||
h := hidrivehash.New()
|
|
||||||
assert.Equal(t, 4096, h.BlockSize())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBinaryMarshaler(t *testing.T) {
|
|
||||||
for _, test := range testTable {
|
|
||||||
h := hidrivehash.New()
|
|
||||||
d := hidrivehash.New()
|
|
||||||
half := len(test.pattern) / 2
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half])
|
|
||||||
assert.NoError(t, err)
|
|
||||||
encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary()
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded)
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
|
||||||
err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:])
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:])
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidEncoding(t *testing.T) {
|
|
||||||
h := hidrivehash.New()
|
|
||||||
err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{})
|
|
||||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSum(t *testing.T) {
|
|
||||||
assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{}))
|
|
||||||
content := []byte{1}
|
|
||||||
h := hidrivehash.New()
|
|
||||||
h.Write(content)
|
|
||||||
sum := hidrivehash.Sum(content)
|
|
||||||
assert.Equal(t, h.Sum(nil), sum[:])
|
|
||||||
}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
// Package internal provides utilities for HiDrive.
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LevelHash is an internal interface for level-hashes.
|
|
||||||
type LevelHash interface {
|
|
||||||
encoding.BinaryMarshaler
|
|
||||||
encoding.BinaryUnmarshaler
|
|
||||||
hash.Hash
|
|
||||||
// Add takes a position-embedded checksum and adds it to the level.
|
|
||||||
Add(sum []byte)
|
|
||||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
|
||||||
IsFull() bool
|
|
||||||
}
|
|
||||||
@@ -35,11 +35,11 @@ var (
|
|||||||
func init() {
|
func init() {
|
||||||
fsi := &fs.RegInfo{
|
fsi := &fs.RegInfo{
|
||||||
Name: "http",
|
Name: "http",
|
||||||
Description: "HTTP",
|
Description: "http Connection",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "headers",
|
Name: "headers",
|
||||||
|
|||||||
62
backend/hubic/auth.go
Normal file
62
backend/hubic/auth.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package hubic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/swift/v2"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// auth is an authenticator for swift
|
||||||
|
type auth struct {
|
||||||
|
f *Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// newAuth creates a swift authenticator
|
||||||
|
func newAuth(f *Fs) *auth {
|
||||||
|
return &auth{
|
||||||
|
f: f,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request constructs an http.Request for authentication
|
||||||
|
//
|
||||||
|
// returns nil for not needed
|
||||||
|
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||||
|
const retries = 10
|
||||||
|
for try := 1; try <= retries; try++ {
|
||||||
|
err = a.f.getCredentials(context.TODO())
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response parses the result of an http request
|
||||||
|
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The public storage URL - set Internal to true to read
|
||||||
|
// internal/service net URL
|
||||||
|
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||||
|
return a.f.credentials.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// The access token
|
||||||
|
func (a *auth) Token() string {
|
||||||
|
return a.f.credentials.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
// The CDN url if available
|
||||||
|
func (a *auth) CdnUrl() string { // nolint
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var _ swift.Authenticator = (*auth)(nil)
|
||||||
200
backend/hubic/hubic.go
Normal file
200
backend/hubic/hubic.go
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
// Package hubic provides an interface to the Hubic object storage
|
||||||
|
// system.
|
||||||
|
package hubic
|
||||||
|
|
||||||
|
// This uses the normal swift mechanism to update the credentials and
|
||||||
|
// ignores the expires field returned by the Hubic API. This may need
|
||||||
|
// to be revisited after some actual experience.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
swiftLib "github.com/ncw/swift/v2"
|
||||||
|
"github.com/rclone/rclone/backend/swift"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
|
||||||
|
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Globals
|
||||||
|
var (
|
||||||
|
// Description of how to auth for this app
|
||||||
|
oauthConfig = &oauth2.Config{
|
||||||
|
Scopes: []string{
|
||||||
|
"credentials.r", // Read OpenStack credentials
|
||||||
|
},
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: "https://api.hubic.com/oauth/auth/",
|
||||||
|
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||||
|
},
|
||||||
|
ClientID: rcloneClientID,
|
||||||
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register with Fs
|
||||||
|
func init() {
|
||||||
|
fs.Register(&fs.RegInfo{
|
||||||
|
Name: "hubic",
|
||||||
|
Description: "Hubic",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
|
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||||
|
OAuth2Config: oauthConfig,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// credentials is the JSON returned from the Hubic API to read the
|
||||||
|
// OpenStack credentials
|
||||||
|
type credentials struct {
|
||||||
|
Token string `json:"token"` // OpenStack token
|
||||||
|
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||||
|
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a remote hubic
|
||||||
|
type Fs struct {
|
||||||
|
fs.Fs // wrapped Fs
|
||||||
|
features *fs.Features // optional features
|
||||||
|
client *http.Client // client for oauth api
|
||||||
|
credentials credentials // returned from the Hubic API
|
||||||
|
expires time.Time // time credentials expire
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object describes a swift object
|
||||||
|
type Object struct {
|
||||||
|
*swift.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.Object.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
|
// String converts this Fs to a string
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
if f.Fs == nil {
|
||||||
|
return "Hubic"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Hubic %s", f.Fs.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||||
|
//
|
||||||
|
// The credentials are read into the Fs
|
||||||
|
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp, err := f.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fs.CheckClose(resp.Body, &err)
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||||
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
|
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||||
|
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||||
|
}
|
||||||
|
decoder := json.NewDecoder(resp.Body)
|
||||||
|
var result credentials
|
||||||
|
err = decoder.Decode(&result)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// fs.Debugf(f, "Got credentials %+v", result)
|
||||||
|
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||||
|
return errors.New("couldn't read token, result and expired from credentials")
|
||||||
|
}
|
||||||
|
f.credentials = result
|
||||||
|
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.expires = expires
|
||||||
|
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path, container:path
|
||||||
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the swift Connection
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
c := &swiftLib.Connection{
|
||||||
|
Auth: newAuth(f),
|
||||||
|
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||||
|
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||||
|
Transport: fshttp.NewTransport(ctx),
|
||||||
|
}
|
||||||
|
err = c.Authenticate(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse config into swift.Options struct
|
||||||
|
opt := new(swift.Options)
|
||||||
|
err = configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make inner swift Fs from the connection
|
||||||
|
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||||
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.Fs = swiftFs
|
||||||
|
f.features = f.Fs.Features().Wrap(f)
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
|
func (f *Fs) UnWrap() fs.Fs {
|
||||||
|
return f.Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
)
|
||||||
19
backend/hubic/hubic_test.go
Normal file
19
backend/hubic/hubic_test.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
// Test Hubic filesystem interface
|
||||||
|
package hubic_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/hubic"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestHubic:",
|
||||||
|
NilObject: (*hubic.Object)(nil),
|
||||||
|
SkipFsCheckWrap: true,
|
||||||
|
SkipObjectCheckWrap: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -38,100 +37,6 @@ func init() {
|
|||||||
Name: "internetarchive",
|
Name: "internetarchive",
|
||||||
Description: "Internet Archive",
|
Description: "Internet Archive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
|
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
System: map[string]fs.MetadataHelp{
|
|
||||||
"name": {
|
|
||||||
Help: "Full file path, without the bucket part",
|
|
||||||
Type: "filename",
|
|
||||||
Example: "backend/internetarchive/internetarchive.go",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
Help: "The source of the file",
|
|
||||||
Type: "string",
|
|
||||||
Example: "original",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"mtime": {
|
|
||||||
Help: "Time of last modification, managed by Rclone",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999999999Z",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"size": {
|
|
||||||
Help: "File size in bytes",
|
|
||||||
Type: "decimal number",
|
|
||||||
Example: "123456",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"md5": {
|
|
||||||
Help: "MD5 hash calculated by Internet Archive",
|
|
||||||
Type: "string",
|
|
||||||
Example: "01234567012345670123456701234567",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"crc32": {
|
|
||||||
Help: "CRC32 calculated by Internet Archive",
|
|
||||||
Type: "string",
|
|
||||||
Example: "01234567",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"sha1": {
|
|
||||||
Help: "SHA1 hash calculated by Internet Archive",
|
|
||||||
Type: "string",
|
|
||||||
Example: "0123456701234567012345670123456701234567",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"format": {
|
|
||||||
Help: "Name of format identified by Internet Archive",
|
|
||||||
Type: "string",
|
|
||||||
Example: "Comma-Separated Values",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"old_version": {
|
|
||||||
Help: "Whether the file was replaced and moved by keep-old-version flag",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "true",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"viruscheck": {
|
|
||||||
Help: "The last time viruscheck process was run for the file (?)",
|
|
||||||
Type: "unixtime",
|
|
||||||
Example: "1654191352",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"summation": {
|
|
||||||
Help: "Check https://forum.rclone.org/t/31922 for how it is used",
|
|
||||||
Type: "string",
|
|
||||||
Example: "md5",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
"rclone-ia-mtime": {
|
|
||||||
Help: "Time of last modification, managed by Internet Archive",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999999999Z",
|
|
||||||
},
|
|
||||||
"rclone-mtime": {
|
|
||||||
Help: "Time of last modification, managed by Rclone",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999999999Z",
|
|
||||||
},
|
|
||||||
"rclone-update-track": {
|
|
||||||
Help: "Random value used by Rclone for tracking changes inside Internet Archive",
|
|
||||||
Type: "string",
|
|
||||||
Example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Help: `Metadata fields provided by Internet Archive.
|
|
||||||
If there are multiple values for a key, only the first one is returned.
|
|
||||||
This is a limitation of Rclone, that supports one value per one key.
|
|
||||||
|
|
||||||
Owner is able to add custom keys. Metadata feature grabs all the keys including them.
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "access_key_id",
|
Name: "access_key_id",
|
||||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||||
@@ -184,14 +89,6 @@ Only enable if you need to be guaranteed to be reflected after write operations.
|
|||||||
// maximum size of an item. this is constant across all items
|
// maximum size of an item. this is constant across all items
|
||||||
const iaItemMaxSize int64 = 1099511627776
|
const iaItemMaxSize int64 = 1099511627776
|
||||||
|
|
||||||
// metadata keys that are not writeable
|
|
||||||
var roMetadataKey = map[string]interface{}{
|
|
||||||
// do not add mtime here, it's a documented exception
|
|
||||||
"name": nil, "source": nil, "size": nil, "md5": nil,
|
|
||||||
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
|
|
||||||
"viruscheck": nil, "summation": nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
AccessKeyID string `config:"access_key_id"`
|
AccessKeyID string `config:"access_key_id"`
|
||||||
@@ -224,37 +121,26 @@ type Object struct {
|
|||||||
md5 string // md5 hash of the file presented by the server
|
md5 string // md5 hash of the file presented by the server
|
||||||
sha1 string // sha1 hash of the file presented by the server
|
sha1 string // sha1 hash of the file presented by the server
|
||||||
crc32 string // crc32 of the file presented by the server
|
crc32 string // crc32 of the file presented by the server
|
||||||
rawData json.RawMessage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IAFile represents a subset of object in MetadataResponse.Files
|
// IAFile reprensents a subset of object in MetadataResponse.Files
|
||||||
type IAFile struct {
|
type IAFile struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
// Source string `json:"source"`
|
// Source string `json:"source"`
|
||||||
Mtime string `json:"mtime"`
|
Mtime string `json:"mtime"`
|
||||||
RcloneMtime json.RawMessage `json:"rclone-mtime"`
|
RcloneMtime json.RawMessage `json:"rclone-mtime"`
|
||||||
UpdateTrack json.RawMessage `json:"rclone-update-track"`
|
|
||||||
Size string `json:"size"`
|
Size string `json:"size"`
|
||||||
Md5 string `json:"md5"`
|
Md5 string `json:"md5"`
|
||||||
Crc32 string `json:"crc32"`
|
Crc32 string `json:"crc32"`
|
||||||
Sha1 string `json:"sha1"`
|
Sha1 string `json:"sha1"`
|
||||||
Summation string `json:"summation"`
|
|
||||||
|
|
||||||
rawData json.RawMessage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
|
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
|
||||||
type MetadataResponse struct {
|
type MetadataResponse struct {
|
||||||
Files []IAFile `json:"files"`
|
Files []IAFile `json:"files"`
|
||||||
ItemSize int64 `json:"item_size"`
|
ItemSize int64 `json:"item_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetadataResponseRaw is the form of MetadataResponse to deal with metadata
|
|
||||||
type MetadataResponseRaw struct {
|
|
||||||
Files []json.RawMessage `json:"files"`
|
|
||||||
ItemSize int64 `json:"item_size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModMetadataResponse represents response for amending metadata
|
// ModMetadataResponse represents response for amending metadata
|
||||||
type ModMetadataResponse struct {
|
type ModMetadataResponse struct {
|
||||||
// https://archive.org/services/docs/api/md-write.html#example
|
// https://archive.org/services/docs/api/md-write.html#example
|
||||||
@@ -339,9 +225,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
f.srv = rest.NewClient(fshttp.NewClient(ctx))
|
f.srv = rest.NewClient(fshttp.NewClient(ctx))
|
||||||
@@ -411,7 +294,7 @@ func (o *Object) Storable() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets modTime on a particular file
|
// SetModTime is not supported
|
||||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||||
bucket, reqDir := o.split()
|
bucket, reqDir := o.split()
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
@@ -422,17 +305,18 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// https://archive.org/services/docs/api/md-write.html
|
// https://archive.org/services/docs/api/md-write.html
|
||||||
// the following code might be useful for modifying metadata of an uploaded file
|
var patch = []interface{}{
|
||||||
patch := []map[string]string{
|
|
||||||
// we should drop it first to clear all rclone-provided mtimes
|
// we should drop it first to clear all rclone-provided mtimes
|
||||||
{
|
struct {
|
||||||
"op": "remove",
|
Op string `json:"op"`
|
||||||
"path": "/rclone-mtime",
|
Path string `json:"path"`
|
||||||
}, {
|
}{"remove", "/rclone-mtime"},
|
||||||
"op": "add",
|
struct {
|
||||||
"path": "/rclone-mtime",
|
Op string `json:"op"`
|
||||||
"value": t.Format(time.RFC3339Nano),
|
Path string `json:"path"`
|
||||||
}}
|
Value string `json:"value"`
|
||||||
|
}{"add", "/rclone-mtime", t.Format(time.RFC3339Nano)},
|
||||||
|
}
|
||||||
res, err := json.Marshal(patch)
|
res, err := json.Marshal(patch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -572,14 +456,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
bucket, bucketPath := f.split(remote)
|
bucket, bucketPath := f.split(remote)
|
||||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, bucketPath), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -599,7 +483,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
|||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
updateTracker := random.String(32)
|
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
"x-archive-auto-make-bucket": "1",
|
"x-archive-auto-make-bucket": "1",
|
||||||
"x-archive-queue-derive": "0",
|
"x-archive-queue-derive": "0",
|
||||||
@@ -612,7 +495,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
|||||||
"x-archive-filemeta-size": fmt.Sprint(srcObj.size),
|
"x-archive-filemeta-size": fmt.Sprint(srcObj.size),
|
||||||
// add this too for sure
|
// add this too for sure
|
||||||
"x-archive-filemeta-rclone-mtime": srcObj.modTime.Format(time.RFC3339Nano),
|
"x-archive-filemeta-rclone-mtime": srcObj.modTime.Format(time.RFC3339Nano),
|
||||||
"x-archive-filemeta-rclone-update-track": updateTracker,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// make a PUT request at (IAS3)/:item/:path without body
|
// make a PUT request at (IAS3)/:item/:path without body
|
||||||
@@ -633,7 +515,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
|||||||
|
|
||||||
// we can't update/find metadata here as IA will also
|
// we can't update/find metadata here as IA will also
|
||||||
// queue server-side copy as well as upload/delete.
|
// queue server-side copy as well as upload/delete.
|
||||||
return f.waitFileUpload(ctx, trimPathPrefix(path.Join(dstBucket, dstPath), f.root, f.opt.Enc), updateTracker, srcObj.size)
|
return f.waitFileUpload(ctx, trimPathPrefix(path.Join(dstBucket, dstPath), f.root, f.opt.Enc), f.getHashes(ctx, src), srcObj.size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -760,7 +642,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// make a GET request to (frontend)/download/:item/:path
|
// make a GET request to (frontend)/download/:item/:path
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
Path: path.Join("/download/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)),
|
||||||
Options: optionsFixed,
|
Options: optionsFixed,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -778,14 +660,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime(ctx)
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
updateTracker := random.String(32)
|
|
||||||
|
|
||||||
// Set the mtime in the metadata
|
// Set the mtime in the metadata
|
||||||
// internetarchive backend builds at header level as IAS3 has extension outside X-Amz-
|
// internetarchive backend builds at header level as IAS3 has extension outside X-Amz-
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
// https://github.com/jjjake/internetarchive/blob/2456376533251df9d05e0a14d796ec1ced4959f5/internetarchive/iarequest.py#L158
|
// https://github.com/jjjake/internetarchive/blob/2456376533251df9d05e0a14d796ec1ced4959f5/internetarchive/iarequest.py#L158
|
||||||
"x-amz-filemeta-rclone-mtime": modTime.Format(time.RFC3339Nano),
|
"x-amz-filemeta-rclone-mtime": modTime.Format(time.RFC3339Nano),
|
||||||
"x-amz-filemeta-rclone-update-track": updateTracker,
|
|
||||||
|
|
||||||
// we add some more headers for intuitive actions
|
// we add some more headers for intuitive actions
|
||||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||||
@@ -799,23 +679,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||||
}
|
}
|
||||||
var mdata fs.Metadata
|
|
||||||
mdata, err = fs.GetMetadataOptions(ctx, src, options)
|
|
||||||
if err == nil && mdata != nil {
|
|
||||||
for mk, mv := range mdata {
|
|
||||||
mk = strings.ToLower(mk)
|
|
||||||
if strings.HasPrefix(mk, "rclone-") {
|
|
||||||
fs.LogPrintf(fs.LogLevelWarning, o, "reserved metadata key %s is about to set", mk)
|
|
||||||
} else if _, ok := roMetadataKey[mk]; ok {
|
|
||||||
fs.LogPrintf(fs.LogLevelWarning, o, "setting or modifying read-only key %s is requested, skipping", mk)
|
|
||||||
continue
|
|
||||||
} else if mk == "mtime" {
|
|
||||||
// redirect to make it work
|
|
||||||
mk = "rclone-mtime"
|
|
||||||
}
|
|
||||||
headers[fmt.Sprintf("x-amz-filemeta-%s", mk)] = mv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// read the md5sum if available
|
// read the md5sum if available
|
||||||
var md5sumHex string
|
var md5sumHex string
|
||||||
@@ -849,7 +712,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// or we have to wait for finish? (needs polling (frontend)/metadata/:item or scraping (frontend)/history/:item)
|
// or we have to wait for finish? (needs polling (frontend)/metadata/:item or scraping (frontend)/history/:item)
|
||||||
var newObj *Object
|
var newObj *Object
|
||||||
if err == nil {
|
if err == nil {
|
||||||
newObj, err = o.fs.waitFileUpload(ctx, o.remote, updateTracker, size)
|
newObj, err = o.fs.waitFileUpload(ctx, o.remote, o.fs.getHashes(ctx, src), size)
|
||||||
} else {
|
} else {
|
||||||
newObj = &Object{}
|
newObj = &Object{}
|
||||||
}
|
}
|
||||||
@@ -893,34 +756,6 @@ func (o *Object) String() string {
|
|||||||
return o.remote
|
return o.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns all file metadata provided by Internet Archive
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
|
|
||||||
if o.rawData == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
raw := make(map[string]json.RawMessage)
|
|
||||||
err = json.Unmarshal(o.rawData, &raw)
|
|
||||||
if err != nil {
|
|
||||||
// fatal: json parsing failed
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for k, v := range raw {
|
|
||||||
items, err := listOrString(v)
|
|
||||||
if len(items) == 0 || err != nil {
|
|
||||||
// skip: an entry failed to parse
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m.Set(k, items[0])
|
|
||||||
}
|
|
||||||
// move the old mtime to an another key
|
|
||||||
if v, ok := m["mtime"]; ok {
|
|
||||||
m["rclone-ia-mtime"] = v
|
|
||||||
}
|
|
||||||
// overwrite with a correct mtime
|
|
||||||
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
for _, e := range retryErrorCodes {
|
for _, e := range retryErrorCodes {
|
||||||
@@ -947,7 +782,19 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
return o.fs.split(o.remote)
|
return o.fs.split(o.remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *MetadataResponse, err error) {
|
func (f *Fs) getHashes(ctx context.Context, src fs.ObjectInfo) map[hash.Type]string {
|
||||||
|
hashMap := map[hash.Type]string{}
|
||||||
|
for _, ty := range f.Hashes().Array() {
|
||||||
|
sum, err := src.Hash(ctx, ty)
|
||||||
|
if err != nil || sum == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hashMap[ty] = sum
|
||||||
|
}
|
||||||
|
return hashMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result MetadataResponse, err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
// make a GET request to (frontend)/metadata/:item/
|
// make a GET request to (frontend)/metadata/:item/
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -955,15 +802,12 @@ func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *Metada
|
|||||||
Path: path.Join("/metadata/", bucket),
|
Path: path.Join("/metadata/", bucket),
|
||||||
}
|
}
|
||||||
|
|
||||||
var temp MetadataResponseRaw
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.front.CallJSON(ctx, &opts, nil, &temp)
|
resp, err = f.front.CallJSON(ctx, &opts, nil, &result)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return
|
return result, err
|
||||||
}
|
|
||||||
return temp.unraw()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// list up all files/directories without any filters
|
// list up all files/directories without any filters
|
||||||
@@ -1008,7 +852,7 @@ func (f *Fs) listAllUnconstrained(ctx context.Context, bucket string) (entries f
|
|||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSize int64) (ret *Object, err error) {
|
func (f *Fs) waitFileUpload(ctx context.Context, reqPath string, newHashes map[hash.Type]string, newSize int64) (ret *Object, err error) {
|
||||||
bucket, bucketPath := f.split(reqPath)
|
bucket, bucketPath := f.split(reqPath)
|
||||||
|
|
||||||
ret = &Object{
|
ret = &Object{
|
||||||
@@ -1025,10 +869,6 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
|
|||||||
ret2, ok := ret2.(*Object)
|
ret2, ok := ret2.(*Object)
|
||||||
if ok {
|
if ok {
|
||||||
ret = ret2
|
ret = ret2
|
||||||
ret.crc32 = ""
|
|
||||||
ret.md5 = ""
|
|
||||||
ret.sha1 = ""
|
|
||||||
ret.size = -1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret, nil
|
return ret, nil
|
||||||
@@ -1041,6 +881,9 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
|
|||||||
go func() {
|
go func() {
|
||||||
isFirstTime := true
|
isFirstTime := true
|
||||||
existed := false
|
existed := false
|
||||||
|
oldMtime := ""
|
||||||
|
oldCrc32 := ""
|
||||||
|
unreliablePassCount := 0
|
||||||
for {
|
for {
|
||||||
if !isFirstTime {
|
if !isFirstTime {
|
||||||
// depending on the queue, it takes time
|
// depending on the queue, it takes time
|
||||||
@@ -1065,6 +908,10 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
|
|||||||
if isFirstTime {
|
if isFirstTime {
|
||||||
isFirstTime = false
|
isFirstTime = false
|
||||||
existed = iaFile != nil
|
existed = iaFile != nil
|
||||||
|
if iaFile != nil {
|
||||||
|
oldMtime = iaFile.Mtime
|
||||||
|
oldCrc32 = iaFile.Crc32
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if iaFile == nil {
|
if iaFile == nil {
|
||||||
continue
|
continue
|
||||||
@@ -1078,20 +925,38 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fileTrackers, _ := listOrString(iaFile.UpdateTrack)
|
hashMatched := true
|
||||||
trackerMatch := false
|
for tt, sum := range newHashes {
|
||||||
for _, v := range fileTrackers {
|
if tt == hash.MD5 && !hash.Equals(iaFile.Md5, sum) {
|
||||||
if v == tracker {
|
hashMatched = false
|
||||||
trackerMatch = true
|
break
|
||||||
|
}
|
||||||
|
if tt == hash.SHA1 && !hash.Equals(iaFile.Sha1, sum) {
|
||||||
|
hashMatched = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if tt == hash.CRC32 && !hash.Equals(iaFile.Crc32, sum) {
|
||||||
|
hashMatched = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !trackerMatch {
|
if !hashMatched {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !compareSize(parseSize(iaFile.Size), newSize) {
|
if !compareSize(parseSize(iaFile.Size), newSize) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if hash.Equals(oldCrc32, iaFile.Crc32) && unreliablePassCount < 60 {
|
||||||
|
// the following two are based on a sort of "bad" assumption;
|
||||||
|
// what if the file is updated immediately, before polling?
|
||||||
|
// by limiting hits of these tests, avoid infinite loop
|
||||||
|
unreliablePassCount++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hash.Equals(iaFile.Mtime, oldMtime) && unreliablePassCount < 60 {
|
||||||
|
unreliablePassCount++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// voila!
|
// voila!
|
||||||
retC <- struct {
|
retC <- struct {
|
||||||
@@ -1152,21 +1017,15 @@ func (f *Fs) waitDelete(ctx context.Context, bucket, bucketPath string) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object {
|
func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object {
|
||||||
ret := &Object{
|
return &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
modTime: mtime,
|
modTime: mtime,
|
||||||
size: size,
|
size: size,
|
||||||
rawData: file.rawData,
|
md5: file.Md5,
|
||||||
|
crc32: file.Crc32,
|
||||||
|
sha1: file.Sha1,
|
||||||
}
|
}
|
||||||
// hashes from _files.xml (where summation != "") is different from one in other files
|
|
||||||
// https://forum.rclone.org/t/internet-archive-md5-tag-in-id-files-xml-interpreted-incorrectly/31922
|
|
||||||
if file.Summation == "" {
|
|
||||||
ret.md5 = file.Md5
|
|
||||||
ret.crc32 = file.Crc32
|
|
||||||
ret.sha1 = file.Sha1
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
|
func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
|
||||||
@@ -1177,24 +1036,20 @@ func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
|
|||||||
return makeValidObject(f, trimPathPrefix(path.Join(bucket, file.Name), f.root, f.opt.Enc), file, mtimeTime, size)
|
return makeValidObject(f, trimPathPrefix(path.Join(bucket, file.Name), f.root, f.opt.Enc), file, mtimeTime, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func listOrString(jm json.RawMessage) (rmArray []string, err error) {
|
func (file IAFile) parseMtime() (mtime time.Time) {
|
||||||
|
// method 1: use metadata added by rclone
|
||||||
|
var rmArray []string
|
||||||
// rclone-metadata can be an array or string
|
// rclone-metadata can be an array or string
|
||||||
// try to deserialize it as array first
|
// try to deserialize it as array first
|
||||||
err = json.Unmarshal(jm, &rmArray)
|
err := json.Unmarshal(file.RcloneMtime, &rmArray)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if not, it's a string
|
// if not, it's a string
|
||||||
dst := new(string)
|
dst := new(string)
|
||||||
err = json.Unmarshal(jm, dst)
|
err = json.Unmarshal(file.RcloneMtime, dst)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
rmArray = []string{*dst}
|
rmArray = []string{*dst}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (file IAFile) parseMtime() (mtime time.Time) {
|
|
||||||
// method 1: use metadata added by rclone
|
|
||||||
rmArray, err := listOrString(file.RcloneMtime)
|
|
||||||
// let's take the first value we can deserialize
|
// let's take the first value we can deserialize
|
||||||
for _, value := range rmArray {
|
for _, value := range rmArray {
|
||||||
mtime, err = time.Parse(time.RFC3339Nano, value)
|
mtime, err = time.Parse(time.RFC3339Nano, value)
|
||||||
@@ -1213,23 +1068,6 @@ func (file IAFile) parseMtime() (mtime time.Time) {
|
|||||||
return mtime
|
return mtime
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mrr *MetadataResponseRaw) unraw() (_ *MetadataResponse, err error) {
|
|
||||||
var files []IAFile
|
|
||||||
for _, raw := range mrr.Files {
|
|
||||||
var parsed IAFile
|
|
||||||
err = json.Unmarshal(raw, &parsed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
parsed.rawData = raw
|
|
||||||
files = append(files, parsed)
|
|
||||||
}
|
|
||||||
return &MetadataResponse{
|
|
||||||
Files: files,
|
|
||||||
ItemSize: mrr.ItemSize,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareSize(a, b int64) bool {
|
func compareSize(a, b int64) bool {
|
||||||
if a < 0 || b < 0 {
|
if a < 0 || b < 0 {
|
||||||
// we won't compare if any of them is not known
|
// we won't compare if any of them is not known
|
||||||
@@ -1273,7 +1111,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
|||||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||||
func quotePath(s string) string {
|
func quotePath(s string) string {
|
||||||
seg := strings.Split(s, "/")
|
seg := strings.Split(s, "/")
|
||||||
newValues := []string{}
|
newValues := []string{}
|
||||||
@@ -1291,5 +1129,4 @@ var (
|
|||||||
_ fs.PublicLinker = &Fs{}
|
_ fs.PublicLinker = &Fs{}
|
||||||
_ fs.Abouter = &Fs{}
|
_ fs.Abouter = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.Metadataer = &Object{}
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package api provides types used by the Jottacloud API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package jottacloud provides an interface to the Jottacloud storage system.
|
|
||||||
package jottacloud
|
package jottacloud
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -47,9 +46,9 @@ const (
|
|||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
defaultDevice = "Jotta"
|
defaultDevice = "Jotta"
|
||||||
defaultMountpoint = "Archive"
|
defaultMountpoint = "Archive"
|
||||||
jfsURL = "https://jfs.jottacloud.com/jfs/"
|
rootURL = "https://jfs.jottacloud.com/jfs/"
|
||||||
apiURL = "https://api.jottacloud.com/"
|
apiURL = "https://api.jottacloud.com/"
|
||||||
wwwURL = "https://www.jottacloud.com/"
|
baseURL = "https://www.jottacloud.com/"
|
||||||
cachePrefix = "rclone-jcmd5-"
|
cachePrefix = "rclone-jcmd5-"
|
||||||
configDevice = "device"
|
configDevice = "device"
|
||||||
configMountpoint = "mountpoint"
|
configMountpoint = "mountpoint"
|
||||||
@@ -128,7 +127,7 @@ func init() {
|
|||||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
switch config.State {
|
switch config.State {
|
||||||
case "":
|
case "":
|
||||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
|
||||||
Value: "standard",
|
Value: "standard",
|
||||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||||
}, {
|
}, {
|
||||||
@@ -146,7 +145,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||||||
return fs.ConfigGoto(config.Result)
|
return fs.ConfigGoto(config.Result)
|
||||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||||
case "standard_token":
|
case "standard_token":
|
||||||
loginToken := config.Result
|
loginToken := config.Result
|
||||||
m.Set(configClientID, defaultClientID)
|
m.Set(configClientID, defaultClientID)
|
||||||
@@ -192,7 +191,7 @@ machines.`)
|
|||||||
m.Set("auth_code", "")
|
m.Set("auth_code", "")
|
||||||
return fs.ConfigGoto("legacy_do_auth")
|
return fs.ConfigGoto("legacy_do_auth")
|
||||||
case "legacy_auth_code":
|
case "legacy_auth_code":
|
||||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||||
m.Set("auth_code", authCode)
|
m.Set("auth_code", authCode)
|
||||||
return fs.ConfigGoto("legacy_do_auth")
|
return fs.ConfigGoto("legacy_do_auth")
|
||||||
case "legacy_do_auth":
|
case "legacy_do_auth":
|
||||||
@@ -263,11 +262,7 @@ machines.`)
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
case "choose_device":
|
case "choose_device":
|
||||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||||
Choosing no, the default, will let you access the storage used for the archive
|
|
||||||
section of the official Jottacloud client. If you instead want to access the
|
|
||||||
sync or the backup section, for example, you must choose yes.`)
|
|
||||||
|
|
||||||
case "choose_device_query":
|
case "choose_device_query":
|
||||||
if config.Result != "true" {
|
if config.Result != "true" {
|
||||||
m.Set(configDevice, "")
|
m.Set(configDevice, "")
|
||||||
@@ -278,139 +273,43 @@ sync or the backup section, for example, you must choose yes.`)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||||
|
|
||||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
m.Set(configUsername, cust.Username)
|
||||||
|
|
||||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
|
||||||
deviceNames := make([]string, len(acc.Devices))
|
return acc.Devices[i].Name, ""
|
||||||
for i, dev := range acc.Devices {
|
|
||||||
if i > 0 && dev.Name == defaultDevice {
|
|
||||||
// Insert the special Jotta device as first entry, making it the default choice.
|
|
||||||
copy(deviceNames[1:i+1], deviceNames[0:i])
|
|
||||||
deviceNames[0] = dev.Name
|
|
||||||
} else {
|
|
||||||
deviceNames[i] = dev.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
help := fmt.Sprintf(`The device to use. In standard setup the built-in %s device is used,
|
|
||||||
which contains predefined mountpoints for archive, sync etc. All other devices
|
|
||||||
are treated as backup devices by the official Jottacloud client. You may create
|
|
||||||
a new by entering a unique name.`, defaultDevice)
|
|
||||||
return fs.ConfigChoose("choose_device_result", "config_device", help, len(deviceNames), func(i int) (string, string) {
|
|
||||||
return deviceNames[i], ""
|
|
||||||
})
|
})
|
||||||
case "choose_device_result":
|
case "choose_device_result":
|
||||||
device := config.Result
|
device := config.Result
|
||||||
|
m.Set(configDevice, device)
|
||||||
|
|
||||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
|
||||||
|
|
||||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
username, _ := m.Get(configUsername)
|
||||||
|
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
|
||||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
isNew := true
|
|
||||||
for _, dev := range acc.Devices {
|
|
||||||
if strings.EqualFold(dev.Name, device) { // If device name exists with different casing we prefer the existing (not sure if and how the api handles the opposite)
|
|
||||||
device = dev.Name // Prefer same casing as existing, e.g. if user entered "jotta" we use the standard casing "Jotta" instead
|
|
||||||
isNew = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var dev *api.JottaDevice
|
|
||||||
if isNew {
|
|
||||||
fs.Debugf(nil, "Creating new device: %s", device)
|
|
||||||
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.Set(configDevice, device)
|
|
||||||
|
|
||||||
if !isNew {
|
|
||||||
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var help string
|
|
||||||
if device == defaultDevice {
|
|
||||||
// With built-in Jotta device the mountpoint choice is exclusive,
|
|
||||||
// we do not want to risk any problems by creating new mountpoints on it.
|
|
||||||
help = fmt.Sprintf(`The mountpoint to use on the built-in device %s.
|
|
||||||
The standard setup is to use the %s mountpoint. Most other mountpoints
|
|
||||||
have very limited support in rclone and should generally be avoided.`, defaultDevice, defaultMountpoint)
|
|
||||||
return fs.ConfigChooseExclusive("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
|
|
||||||
return dev.MountPoints[i].Name, ""
|
|
||||||
})
|
|
||||||
}
|
|
||||||
help = fmt.Sprintf(`The mountpoint to use on the non-standard device %s.
|
|
||||||
You may create a new by entering a unique name.`, device)
|
|
||||||
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
|
|
||||||
return dev.MountPoints[i].Name, ""
|
return dev.MountPoints[i].Name, ""
|
||||||
})
|
})
|
||||||
case "choose_device_mountpoint":
|
case "choose_device_mountpoint":
|
||||||
mountpoint := config.Result
|
mountpoint := config.Result
|
||||||
|
|
||||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
|
||||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
|
||||||
|
|
||||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
device, _ := m.Get(configDevice)
|
|
||||||
|
|
||||||
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
isNew := true
|
|
||||||
for _, mnt := range dev.MountPoints {
|
|
||||||
if strings.EqualFold(mnt.Name, mountpoint) {
|
|
||||||
mountpoint = mnt.Name
|
|
||||||
isNew = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNew {
|
|
||||||
if device == defaultDevice {
|
|
||||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
|
||||||
}
|
|
||||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
|
||||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.Set(configMountpoint, mountpoint)
|
m.Set(configMountpoint, mountpoint)
|
||||||
|
|
||||||
return fs.ConfigGoto("end")
|
return fs.ConfigGoto("end")
|
||||||
case "end":
|
case "end":
|
||||||
// All the config flows end up here in case we need to carry on with something
|
// All the config flows end up here in case we need to carry on with something
|
||||||
@@ -438,9 +337,8 @@ type Fs struct {
|
|||||||
user string
|
user string
|
||||||
opt Options
|
opt Options
|
||||||
features *fs.Features
|
features *fs.Features
|
||||||
fileEndpoint string
|
endpointURL string
|
||||||
allocateEndpoint string
|
srv *rest.Client
|
||||||
jfsSrv *rest.Client
|
|
||||||
apiSrv *rest.Client
|
apiSrv *rest.Client
|
||||||
pacer *fs.Pacer
|
pacer *fs.Pacer
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
@@ -690,47 +588,15 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createDevice makes a device
|
// setEndpointURL generates the API endpoint URL
|
||||||
func createDevice(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
func (f *Fs) setEndpointURL() {
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: urlPathEscape(path),
|
|
||||||
Parameters: url.Values{},
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Parameters.Set("type", "WORKSTATION")
|
|
||||||
|
|
||||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't create device: %w", err)
|
|
||||||
}
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createMountPoint makes a mount point
|
|
||||||
func createMountPoint(ctx context.Context, srv *rest.Client, path string) (info *api.JottaMountPoint, err error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: urlPathEscape(path),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't create mountpoint: %w", err)
|
|
||||||
}
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setEndpoints generates the API endpoints
|
|
||||||
func (f *Fs) setEndpoints() {
|
|
||||||
if f.opt.Device == "" {
|
if f.opt.Device == "" {
|
||||||
f.opt.Device = defaultDevice
|
f.opt.Device = defaultDevice
|
||||||
}
|
}
|
||||||
if f.opt.Mountpoint == "" {
|
if f.opt.Mountpoint == "" {
|
||||||
f.opt.Mountpoint = defaultMountpoint
|
f.opt.Mountpoint = defaultMountpoint
|
||||||
}
|
}
|
||||||
f.fileEndpoint = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||||
f.allocateEndpoint = path.Join("/jfs", f.opt.Device, f.opt.Mountpoint)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
@@ -742,7 +608,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
|||||||
var result api.JottaFile
|
var result api.JottaFile
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -783,34 +649,17 @@ func errorHandler(resp *http.Response) error {
|
|||||||
|
|
||||||
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
|
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
|
||||||
func urlPathEscape(in string) string {
|
func urlPathEscape(in string) string {
|
||||||
return strings.ReplaceAll(rest.URLPathEscape(in), "+", "%2B")
|
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// filePathRaw returns an unescaped file path (f.root, file)
|
// filePathRaw returns an unescaped file path (f.root, file)
|
||||||
// Optionally made absolute by prefixing with "/", typically required when used
|
func (f *Fs) filePathRaw(file string) string {
|
||||||
// as request parameter instead of the path (which is relative to some root url).
|
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||||
func (f *Fs) filePathRaw(file string, absolute bool) string {
|
|
||||||
prefix := ""
|
|
||||||
if absolute {
|
|
||||||
prefix = "/"
|
|
||||||
}
|
|
||||||
return path.Join(prefix, f.fileEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// filePath returns an escaped file path (f.root, file)
|
// filePath returns an escaped file path (f.root, file)
|
||||||
func (f *Fs) filePath(file string) string {
|
func (f *Fs) filePath(file string) string {
|
||||||
return urlPathEscape(f.filePathRaw(file, false))
|
return urlPathEscape(f.filePathRaw(file))
|
||||||
}
|
|
||||||
|
|
||||||
// allocatePathRaw returns an unescaped allocate file path (f.root, file)
|
|
||||||
// Optionally made absolute by prefixing with "/", typically required when used
|
|
||||||
// as request parameter instead of the path (which is relative to some root url).
|
|
||||||
func (f *Fs) allocatePathRaw(file string, absolute bool) string {
|
|
||||||
prefix := ""
|
|
||||||
if absolute {
|
|
||||||
prefix = "/"
|
|
||||||
}
|
|
||||||
return path.Join(prefix, f.allocateEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Jottacloud requires the grant_type 'refresh_token' string
|
// Jottacloud requires the grant_type 'refresh_token' string
|
||||||
@@ -843,12 +692,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||||||
if ok {
|
if ok {
|
||||||
ver, err = strconv.Atoi(version)
|
ver, err = strconv.Atoi(version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.New("failed to parse config version")
|
return nil, nil, errors.New("Failed to parse config version")
|
||||||
}
|
}
|
||||||
ok = (ver == configVersion) || (ver == legacyConfigVersion)
|
ok = (ver == configVersion) || (ver == legacyConfigVersion)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, errors.New("outdated config - please reconfigure this backend")
|
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
|
||||||
}
|
}
|
||||||
|
|
||||||
baseClient := fshttp.NewClient(ctx)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
@@ -894,7 +743,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||||||
// Create OAuth Client
|
// Create OAuth Client
|
||||||
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to configure Jottacloud oauth client: %w", err)
|
return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
|
||||||
}
|
}
|
||||||
return oAuthClient, ts, nil
|
return oAuthClient, ts, nil
|
||||||
}
|
}
|
||||||
@@ -920,7 +769,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
jfsSrv: rest.NewClient(oAuthClient).SetRoot(jfsURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
@@ -930,7 +779,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
f.jfsSrv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||||
f.features.ListR = nil
|
f.features.ListR = nil
|
||||||
}
|
}
|
||||||
@@ -949,7 +798,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.user = cust.Username
|
f.user = cust.Username
|
||||||
f.setEndpoints()
|
f.setEndpointURL()
|
||||||
|
|
||||||
if root != "" && !rootIsDir {
|
if root != "" && !rootIsDir {
|
||||||
// Check to see if the root actually an existing file
|
// Check to see if the root actually an existing file
|
||||||
@@ -1015,7 +864,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
|
|||||||
opts.Parameters.Set("mkDir", "true")
|
opts.Parameters.Set("mkDir", "true")
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &jf)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1044,7 +893,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var result api.JottaFolder
|
var result api.JottaFolder
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1083,7 +932,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||||
|
|
||||||
type stats struct {
|
type stats struct {
|
||||||
Folders int `xml:"folders"`
|
Folders int `xml:"folders"`
|
||||||
@@ -1119,12 +968,8 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// liststream paths are /mountpoint/root/path
|
|
||||||
// so the returned paths should have /mountpoint/root/ trimmed
|
|
||||||
// as the caller is expecting path.
|
|
||||||
pathPrefix := filesystem.opt.Enc.FromStandardPath(path.Join("/", filesystem.opt.Mountpoint, filesystem.root))
|
|
||||||
trimPathPrefix := func(p string) string {
|
trimPathPrefix := func(p string) string {
|
||||||
p = strings.TrimPrefix(p, pathPrefix)
|
p = strings.TrimPrefix(p, trimPrefix)
|
||||||
p = strings.TrimPrefix(p, "/")
|
p = strings.TrimPrefix(p, "/")
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
@@ -1181,7 +1026,7 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
|
|||||||
|
|
||||||
if expected.Folders != actual.Folders ||
|
if expected.Folders != actual.Folders ||
|
||||||
expected.Files != actual.Files {
|
expected.Files != actual.Files {
|
||||||
return fmt.Errorf("invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1202,12 +1047,16 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.Call(ctx, &opts)
|
resp, err = f.srv.Call(ctx, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = parseListRStream(ctx, resp.Body, f, func(d fs.DirEntry) error {
|
// liststream paths are /mountpoint/root/path
|
||||||
|
// so the returned paths should have /mountpoint/root/ trimmed
|
||||||
|
// as the caller is expecting path.
|
||||||
|
trimPrefix := path.Join("/", f.opt.Mountpoint, f.root)
|
||||||
|
err = parseListRStream(ctx, resp.Body, trimPrefix, f, func(d fs.DirEntry) error {
|
||||||
if d.Remote() == dir {
|
if d.Remote() == dir {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1248,10 +1097,13 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
if f.opt.Device != "Jotta" {
|
||||||
|
return nil, errors.New("upload not supported for devices other than Jotta")
|
||||||
|
}
|
||||||
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
@@ -1261,7 +1113,10 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||||
// defer log.Trace(dirPath, "")("")
|
// defer log.Trace(dirPath, "")("")
|
||||||
// chop off trailing / if it exists
|
// chop off trailing / if it exists
|
||||||
parent := path.Dir(strings.TrimSuffix(dirPath, "/"))
|
if strings.HasSuffix(dirPath, "/") {
|
||||||
|
dirPath = dirPath[:len(dirPath)-1]
|
||||||
|
}
|
||||||
|
parent := path.Dir(dirPath)
|
||||||
if parent == "." {
|
if parent == "." {
|
||||||
parent = ""
|
parent = ""
|
||||||
}
|
}
|
||||||
@@ -1309,7 +1164,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.Call(ctx, &opts)
|
resp, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1362,7 +1217,7 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time,
|
|||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1383,11 +1238,11 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
|||||||
Parameters: url.Values{},
|
Parameters: url.Values{},
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.Parameters.Set(method, f.filePathRaw(dest, true))
|
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1398,9 +1253,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1418,7 +1273,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||||
|
|
||||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||||
@@ -1434,9 +1289,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1496,7 +1351,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.fileEndpoint, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't move directory: %w", err)
|
return fmt.Errorf("couldn't move directory: %w", err)
|
||||||
@@ -1521,7 +1376,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var result api.JottaFile
|
var result api.JottaFile
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1547,19 +1402,19 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return "", errors.New("couldn't create public link - no uri received")
|
return "", errors.New("couldn't create public link - no uri received")
|
||||||
}
|
}
|
||||||
if result.PublicSharePath != "" {
|
if result.PublicSharePath != "" {
|
||||||
webLink := joinPath(wwwURL, result.PublicSharePath)
|
webLink := joinPath(baseURL, result.PublicSharePath)
|
||||||
fs.Debugf(nil, "Web link: %s", webLink)
|
fs.Debugf(nil, "Web link: %s", webLink)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(nil, "No web link received")
|
fs.Debugf(nil, "No web link received")
|
||||||
}
|
}
|
||||||
directLink := joinPath(wwwURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
|
directLink := joinPath(baseURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
|
||||||
fs.Debugf(nil, "Direct link: %s", directLink)
|
fs.Debugf(nil, "Direct link: %s", directLink)
|
||||||
return directLink, nil
|
return directLink, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
info, err := getDriveInfo(ctx, f.jfsSrv, f.user)
|
info, err := getDriveInfo(ctx, f.srv, f.user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1762,7 +1617,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
opts.Parameters.Set("mode", "bin")
|
opts.Parameters.Set("mode", "bin")
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.jfsSrv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1830,7 +1685,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
|||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
@@ -1883,7 +1738,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
Created: fileDate,
|
Created: fileDate,
|
||||||
Modified: fileDate,
|
Modified: fileDate,
|
||||||
Md5: md5String,
|
Md5: md5String,
|
||||||
Path: o.fs.allocatePathRaw(o.remote, true),
|
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||||
}
|
}
|
||||||
|
|
||||||
// send it
|
// send it
|
||||||
@@ -1953,7 +1808,7 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package koofr provides an interface to the Koofr storage system.
|
|
||||||
package koofr
|
package koofr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -352,9 +351,9 @@ func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff
|
|||||||
}
|
}
|
||||||
if f.mountID == "" {
|
if f.mountID == "" {
|
||||||
if opt.MountID == "" {
|
if opt.MountID == "" {
|
||||||
return nil, errors.New("failed to find primary mount")
|
return nil, errors.New("Failed to find primary mount")
|
||||||
}
|
}
|
||||||
return nil, errors.New("failed to find mount " + opt.MountID)
|
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||||
}
|
}
|
||||||
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
|
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
|
||||||
if err == nil && rootFile.Type != "dir" {
|
if err == nil && rootFile.Type != "dir" {
|
||||||
@@ -668,7 +667,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
//
|
//
|
||||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||||
//
|
//
|
||||||
// I am not sure about meaning of "path" parameter; in my experiments
|
// I am not sure about meaning of "path" parameter; in my expriments
|
||||||
// it is always "%2F", and omitting it or putting any other value
|
// it is always "%2F", and omitting it or putting any other value
|
||||||
// results in 404.
|
// results in 404.
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -17,12 +17,8 @@ var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSp
|
|||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var available, total, free int64
|
var available, total, free int64
|
||||||
root, e := syscall.UTF16PtrFromString(f.root)
|
|
||||||
if e != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
|
||||||
}
|
|
||||||
_, _, e1 := getFreeDiskSpace.Call(
|
_, _, e1 := getFreeDiskSpace.Call(
|
||||||
uintptr(unsafe.Pointer(root)),
|
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
@@ -43,22 +42,9 @@ func init() {
|
|||||||
Description: "Local Disk",
|
Description: "Local Disk",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
System: systemMetadataInfo,
|
|
||||||
Help: `Depending on which OS is in use the local backend may return only some
|
|
||||||
of the system metadata. Setting system metadata is supported on all
|
|
||||||
OSes but setting user metadata is only supported on linux, freebsd,
|
|
||||||
netbsd, macOS and Solaris. It is **not** supported on Windows yet
|
|
||||||
([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)).
|
|
||||||
|
|
||||||
User metadata is stored as extended attributes (which may not be
|
|
||||||
supported by all file systems) under the "user.*" prefix.
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "nounc",
|
Name: "nounc",
|
||||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||||
Default: false,
|
|
||||||
Advanced: runtime.GOOS != "windows",
|
Advanced: runtime.GOOS != "windows",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "true",
|
Value: "true",
|
||||||
@@ -244,7 +230,6 @@ type Fs struct {
|
|||||||
precision time.Duration // precision of local filesystem
|
precision time.Duration // precision of local filesystem
|
||||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||||
warned map[string]struct{} // whether we have warned about this string
|
warned map[string]struct{} // whether we have warned about this string
|
||||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
|
||||||
|
|
||||||
// do os.Lstat or os.Stat
|
// do os.Lstat or os.Stat
|
||||||
lstat func(name string) (os.FileInfo, error)
|
lstat func(name string) (os.FileInfo, error)
|
||||||
@@ -288,19 +273,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
dev: devUnset,
|
dev: devUnset,
|
||||||
lstat: os.Lstat,
|
lstat: os.Lstat,
|
||||||
}
|
}
|
||||||
if xattrSupported {
|
|
||||||
f.xattrSupported = 1
|
|
||||||
}
|
|
||||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: f.caseInsensitive(),
|
CaseInsensitive: f.caseInsensitive(),
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
IsLocal: true,
|
IsLocal: true,
|
||||||
SlowHash: true,
|
SlowHash: true,
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
|
||||||
FilterAware: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
if opt.FollowSymlinks {
|
if opt.FollowSymlinks {
|
||||||
f.lstat = os.Stat
|
f.lstat = os.Stat
|
||||||
@@ -445,8 +423,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
|
|
||||||
|
|
||||||
fsDirPath := f.localPath(dir)
|
fsDirPath := f.localPath(dir)
|
||||||
_, err = os.Stat(fsDirPath)
|
_, err = os.Stat(fsDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -497,13 +473,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if fierr != nil {
|
if fierr != nil {
|
||||||
// Don't report errors on any file names that are excluded
|
|
||||||
if useFilter {
|
|
||||||
newRemote := f.cleanRemote(dir, name)
|
|
||||||
if !filter.IncludeRemote(newRemote) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||||
fs.Errorf(dir, "%v", fierr)
|
fs.Errorf(dir, "%v", fierr)
|
||||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||||
@@ -537,11 +506,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
mode = fi.Mode()
|
mode = fi.Mode()
|
||||||
}
|
}
|
||||||
// Don't include non directory if not included
|
|
||||||
// we leave directory filtering to the layer above
|
|
||||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
// Ignore directories which are symlinks. These are junction points under windows which
|
// Ignore directories which are symlinks. These are junction points under windows which
|
||||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||||
@@ -715,9 +679,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -939,7 +903,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|||||||
return "", fmt.Errorf("hash: failed to open: %w", err)
|
return "", fmt.Errorf("hash: failed to open: %w", err)
|
||||||
}
|
}
|
||||||
var hashes map[hash.Type]string
|
var hashes map[hash.Type]string
|
||||||
hashes, err = hash.StreamTypes(readers.NewContextReader(ctx, in), hash.NewHashSet(r))
|
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||||
closeErr := in.Close()
|
closeErr := in.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("hash: failed to read: %w", err)
|
return "", fmt.Errorf("hash: failed to read: %w", err)
|
||||||
@@ -973,22 +937,17 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the atime and ltime of the object
|
|
||||||
func (o *Object) setTimes(atime, mtime time.Time) (err error) {
|
|
||||||
if o.translatedLink {
|
|
||||||
err = lChtimes(o.path, atime, mtime)
|
|
||||||
} else {
|
|
||||||
err = os.Chtimes(o.path, atime, mtime)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
if o.fs.opt.NoSetModTime {
|
if o.fs.opt.NoSetModTime {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err := o.setTimes(modTime, modTime)
|
var err error
|
||||||
|
if o.translatedLink {
|
||||||
|
err = lChtimes(o.path, modTime, modTime)
|
||||||
|
} else {
|
||||||
|
err = os.Chtimes(o.path, modTime, modTime)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1263,16 +1222,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch and set metadata if --metadata is in use
|
|
||||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
err = o.writeMetadata(meta)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set metadata: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReRead info now that we have finished
|
// ReRead info now that we have finished
|
||||||
return o.lstat()
|
return o.lstat()
|
||||||
}
|
}
|
||||||
@@ -1371,56 +1320,31 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
return remove(o.path)
|
return remove(o.path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
|
||||||
metadata, err = o.getXattr()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = o.readMetadataFromFile(&metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the metadata on the object
|
|
||||||
func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
|
||||||
err = o.setXattr(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = o.writeMetadataToFile(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
if runtime.GOOS == "windows" {
|
||||||
if !filepath.IsAbs(s) {
|
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||||
s2, err := filepath.Abs(s)
|
s2, err := filepath.Abs(s)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
s = s2
|
s = s2
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
s = filepath.Clean(s)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
s = filepath.ToSlash(s)
|
s = filepath.ToSlash(s)
|
||||||
vol := filepath.VolumeName(s)
|
vol := filepath.VolumeName(s)
|
||||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||||
s = filepath.FromSlash(s)
|
s = filepath.FromSlash(s)
|
||||||
|
|
||||||
if !noUNC {
|
if !noUNC {
|
||||||
// Convert to UNC
|
// Convert to UNC
|
||||||
s = file.UNCPath(s)
|
s = file.UNCPath(s)
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
if !filepath.IsAbs(s) {
|
||||||
|
s2, err := filepath.Abs(s)
|
||||||
|
if err == nil {
|
||||||
|
s = s2
|
||||||
|
}
|
||||||
|
}
|
||||||
s = enc.FromStandardPath(s)
|
s = enc.FromStandardPath(s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@@ -1435,5 +1359,4 @@ var (
|
|||||||
_ fs.Commander = &Fs{}
|
_ fs.Commander = &Fs{}
|
||||||
_ fs.OpenWriterAter = &Fs{}
|
_ fs.OpenWriterAter = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.Metadataer = &Object{}
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,19 +3,15 @@ package local
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
@@ -192,7 +188,7 @@ func TestHashOnUpdate(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||||
|
|
||||||
// Reupload it with different contents but same size and timestamp
|
// Reupload it with diferent contents but same size and timestamp
|
||||||
var b = bytes.NewBufferString("CONTENT")
|
var b = bytes.NewBufferString("CONTENT")
|
||||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||||
err = o.Update(ctx, b, src)
|
err = o.Update(ctx, b, src)
|
||||||
@@ -233,171 +229,3 @@ func TestHashOnDelete(t *testing.T) {
|
|||||||
_, err = o.Hash(ctx, hash.MD5)
|
_, err = o.Hash(ctx, hash.MD5)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadata(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
r := fstest.NewRun(t)
|
|
||||||
defer r.Finalise()
|
|
||||||
const filePath = "metafile.txt"
|
|
||||||
when := time.Now()
|
|
||||||
const dayLength = len("2001-01-01")
|
|
||||||
whenRFC := when.Format(time.RFC3339Nano)
|
|
||||||
r.WriteFile(filePath, "metadata file contents", when)
|
|
||||||
f := r.Flocal.(*Fs)
|
|
||||||
|
|
||||||
// Get the object
|
|
||||||
obj, err := f.NewObject(ctx, filePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
o := obj.(*Object)
|
|
||||||
|
|
||||||
features := f.Features()
|
|
||||||
|
|
||||||
var hasXID, hasAtime, hasBtime bool
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "darwin", "freebsd", "netbsd", "linux":
|
|
||||||
hasXID, hasAtime, hasBtime = true, true, true
|
|
||||||
case "openbsd", "solaris":
|
|
||||||
hasXID, hasAtime = true, true
|
|
||||||
case "windows":
|
|
||||||
hasAtime, hasBtime = true, true
|
|
||||||
case "plan9", "js":
|
|
||||||
// nada
|
|
||||||
default:
|
|
||||||
t.Errorf("No test cases for OS %q", runtime.GOOS)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, features.ReadMetadata)
|
|
||||||
assert.True(t, features.WriteMetadata)
|
|
||||||
assert.Equal(t, xattrSupported, features.UserMetadata)
|
|
||||||
|
|
||||||
t.Run("Xattr", func(t *testing.T) {
|
|
||||||
if !xattrSupported {
|
|
||||||
t.Skip()
|
|
||||||
}
|
|
||||||
m, err := o.getXattr()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Nil(t, m)
|
|
||||||
|
|
||||||
inM := fs.Metadata{
|
|
||||||
"potato": "chips",
|
|
||||||
"cabbage": "soup",
|
|
||||||
}
|
|
||||||
err = o.setXattr(inM)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
m, err = o.getXattr()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, m)
|
|
||||||
assert.Equal(t, inM, m)
|
|
||||||
})
|
|
||||||
|
|
||||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
|
||||||
mt, ok := o.parseMetadataTime(m, key)
|
|
||||||
assert.True(t, ok)
|
|
||||||
dt := mt.Sub(when)
|
|
||||||
precision := time.Second
|
|
||||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
|
||||||
}
|
|
||||||
|
|
||||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
|
||||||
value, ok := o.parseMetadataInt(m, key, base)
|
|
||||||
assert.True(t, ok)
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
t.Run("Read", func(t *testing.T) {
|
|
||||||
m, err := o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, m)
|
|
||||||
|
|
||||||
// All OSes have these
|
|
||||||
checkInt(m, "mode", 8)
|
|
||||||
checkTime(m, "mtime", when)
|
|
||||||
|
|
||||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
|
||||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
|
||||||
|
|
||||||
if hasAtime {
|
|
||||||
checkTime(m, "atime", when)
|
|
||||||
}
|
|
||||||
if hasBtime {
|
|
||||||
checkTime(m, "btime", when)
|
|
||||||
}
|
|
||||||
if hasXID {
|
|
||||||
checkInt(m, "uid", 10)
|
|
||||||
checkInt(m, "gid", 10)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Write", func(t *testing.T) {
|
|
||||||
newAtimeString := "2011-12-13T14:15:16.999999999Z"
|
|
||||||
newAtime := fstest.Time(newAtimeString)
|
|
||||||
newMtimeString := "2011-12-12T14:15:16.999999999Z"
|
|
||||||
newMtime := fstest.Time(newMtimeString)
|
|
||||||
newBtimeString := "2011-12-11T14:15:16.999999999Z"
|
|
||||||
newBtime := fstest.Time(newBtimeString)
|
|
||||||
newM := fs.Metadata{
|
|
||||||
"mtime": newMtimeString,
|
|
||||||
"atime": newAtimeString,
|
|
||||||
"btime": newBtimeString,
|
|
||||||
// Can't test uid, gid without being root
|
|
||||||
"mode": "0767",
|
|
||||||
"potato": "wedges",
|
|
||||||
}
|
|
||||||
err := o.writeMetadata(newM)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
m, err := o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, m)
|
|
||||||
|
|
||||||
mode := checkInt(m, "mode", 8)
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
|
||||||
}
|
|
||||||
|
|
||||||
checkTime(m, "mtime", newMtime)
|
|
||||||
if hasAtime {
|
|
||||||
checkTime(m, "atime", newAtime)
|
|
||||||
}
|
|
||||||
if haveSetBTime {
|
|
||||||
checkTime(m, "btime", newBtime)
|
|
||||||
}
|
|
||||||
if xattrSupported {
|
|
||||||
assert.Equal(t, "wedges", m["potato"])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
r := fstest.NewRun(t)
|
|
||||||
defer r.Finalise()
|
|
||||||
when := time.Now()
|
|
||||||
r.WriteFile("included", "included file", when)
|
|
||||||
r.WriteFile("excluded", "excluded file", when)
|
|
||||||
f := r.Flocal.(*Fs)
|
|
||||||
|
|
||||||
// Check set up for filtering
|
|
||||||
assert.True(t, f.Features().FilterAware)
|
|
||||||
|
|
||||||
// Add a filter
|
|
||||||
ctx, fi := filter.AddConfig(ctx)
|
|
||||||
require.NoError(t, fi.AddRule("+ included"))
|
|
||||||
require.NoError(t, fi.AddRule("- *"))
|
|
||||||
|
|
||||||
// Check listing without use filter flag
|
|
||||||
entries, err := f.List(ctx, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
sort.Sort(entries)
|
|
||||||
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
|
|
||||||
|
|
||||||
// Add user filter flag
|
|
||||||
ctx = filter.SetUseFilter(ctx, true)
|
|
||||||
|
|
||||||
// Check listing with use filter flag
|
|
||||||
entries, err = f.List(ctx, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
sort.Sort(entries)
|
|
||||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -13,6 +13,5 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "",
|
RemoteName: "",
|
||||||
NilObject: (*local.Object)(nil),
|
NilObject: (*local.Object)(nil),
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,138 +0,0 @@
|
|||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const metadataTimeFormat = time.RFC3339Nano
|
|
||||||
|
|
||||||
// system metadata keys which this backend owns
|
|
||||||
//
|
|
||||||
// not all values supported on all OSes
|
|
||||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|
||||||
"mode": {
|
|
||||||
Help: "File type and mode",
|
|
||||||
Type: "octal, unix style",
|
|
||||||
Example: "0100664",
|
|
||||||
},
|
|
||||||
"uid": {
|
|
||||||
Help: "User ID of owner",
|
|
||||||
Type: "decimal number",
|
|
||||||
Example: "500",
|
|
||||||
},
|
|
||||||
"gid": {
|
|
||||||
Help: "Group ID of owner",
|
|
||||||
Type: "decimal number",
|
|
||||||
Example: "500",
|
|
||||||
},
|
|
||||||
"rdev": {
|
|
||||||
Help: "Device ID (if special file)",
|
|
||||||
Type: "hexadecimal",
|
|
||||||
Example: "1abc",
|
|
||||||
},
|
|
||||||
"atime": {
|
|
||||||
Help: "Time of last access",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
|
||||||
},
|
|
||||||
"mtime": {
|
|
||||||
Help: "Time of last modification",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
|
||||||
},
|
|
||||||
"btime": {
|
|
||||||
Help: "Time of file birth (creation)",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse a time string from metadata with key
|
|
||||||
func (o *Object) parseMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
|
|
||||||
value, ok := m[key]
|
|
||||||
if ok {
|
|
||||||
var err error
|
|
||||||
t, err = time.Parse(metadataTimeFormat, value)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse am int from metadata with key and base
|
|
||||||
func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result int, ok bool) {
|
|
||||||
value, ok := m[key]
|
|
||||||
if ok {
|
|
||||||
var err error
|
|
||||||
result64, err := strconv.ParseInt(value, base, 64)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
result = int(result64)
|
|
||||||
}
|
|
||||||
return result, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the metadata into the file
|
|
||||||
//
|
|
||||||
// It isn't possible to set the ctime and btime under Unix
|
|
||||||
func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|
||||||
var err error
|
|
||||||
atime, atimeOK := o.parseMetadataTime(m, "atime")
|
|
||||||
mtime, mtimeOK := o.parseMetadataTime(m, "mtime")
|
|
||||||
btime, btimeOK := o.parseMetadataTime(m, "btime")
|
|
||||||
if atimeOK || mtimeOK {
|
|
||||||
if atimeOK && !mtimeOK {
|
|
||||||
mtime = atime
|
|
||||||
}
|
|
||||||
if !atimeOK && mtimeOK {
|
|
||||||
atime = mtime
|
|
||||||
}
|
|
||||||
err = o.setTimes(atime, mtime)
|
|
||||||
if err != nil {
|
|
||||||
outErr = fmt.Errorf("failed to set times: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if haveSetBTime {
|
|
||||||
if btimeOK {
|
|
||||||
err = setBTime(o.path, btime)
|
|
||||||
if err != nil {
|
|
||||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uid, hasUID := o.parseMetadataInt(m, "uid", 10)
|
|
||||||
gid, hasGID := o.parseMetadataInt(m, "gid", 10)
|
|
||||||
if hasUID {
|
|
||||||
// FIXME should read UID and GID of current user and only attempt to set it if different
|
|
||||||
if !hasGID {
|
|
||||||
gid = uid
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
|
||||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
|
||||||
} else {
|
|
||||||
err = os.Chown(o.path, uid, gid)
|
|
||||||
if err != nil {
|
|
||||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
|
||||||
if hasMode {
|
|
||||||
err = os.Chmod(o.path, os.FileMode(mode))
|
|
||||||
if err != nil {
|
|
||||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// FIXME not parsing rdev yet
|
|
||||||
return outErr
|
|
||||||
}
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
//go:build darwin || freebsd || netbsd
|
|
||||||
// +build darwin freebsd netbsd
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Read the metadata from the file into metadata where possible
|
|
||||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
|
||||||
info, err := o.fs.lstat(o.path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(o, "didn't return Stat_t as expected")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
|
||||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
|
||||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
|
||||||
if stat.Rdev != 0 {
|
|
||||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
|
||||||
}
|
|
||||||
setTime := func(key string, t syscall.Timespec) {
|
|
||||||
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
|
|
||||||
}
|
|
||||||
setTime("atime", stat.Atimespec)
|
|
||||||
setTime("mtime", stat.Mtimespec)
|
|
||||||
setTime("btime", stat.Birthtimespec)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,102 +0,0 @@
|
|||||||
//go:build linux
|
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
statxCheckOnce sync.Once
|
|
||||||
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Read the metadata from the file into metadata where possible
|
|
||||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
|
||||||
statxCheckOnce.Do(func() {
|
|
||||||
// Check statx() is available as it was only introduced in kernel 4.11
|
|
||||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
|
||||||
var stat unix.Statx_t
|
|
||||||
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
|
||||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
|
||||||
} else {
|
|
||||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return readMetadataFromFileFn(o, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the metadata from the file into metadata where possible
|
|
||||||
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
|
|
||||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
|
||||||
if o.fs.opt.FollowSymlinks {
|
|
||||||
flags = 0
|
|
||||||
}
|
|
||||||
var stat unix.Statx_t
|
|
||||||
// statx() was added to Linux in kernel 4.11
|
|
||||||
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
|
|
||||||
unix.STATX_TYPE | // Want stx_mode & S_IFMT
|
|
||||||
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
|
|
||||||
unix.STATX_UID | // Want stx_uid
|
|
||||||
unix.STATX_GID | // Want stx_gid
|
|
||||||
unix.STATX_ATIME | // Want stx_atime
|
|
||||||
unix.STATX_MTIME | // Want stx_mtime
|
|
||||||
unix.STATX_CTIME | // Want stx_ctime
|
|
||||||
unix.STATX_BTIME), // Want stx_btime
|
|
||||||
&stat)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
|
||||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
|
||||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
|
||||||
if stat.Rdev_major != 0 || stat.Rdev_minor != 0 {
|
|
||||||
m.Set("rdev", fmt.Sprintf("%x", uint64(stat.Rdev_major)<<32|uint64(stat.Rdev_minor)))
|
|
||||||
}
|
|
||||||
setTime := func(key string, t unix.StatxTimestamp) {
|
|
||||||
m.Set(key, time.Unix(t.Sec, int64(t.Nsec)).Format(metadataTimeFormat))
|
|
||||||
}
|
|
||||||
setTime("atime", stat.Atime)
|
|
||||||
setTime("mtime", stat.Mtime)
|
|
||||||
setTime("btime", stat.Btime)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the metadata from the file into metadata where possible
|
|
||||||
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
|
||||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
|
||||||
if o.fs.opt.FollowSymlinks {
|
|
||||||
flags = 0
|
|
||||||
}
|
|
||||||
var stat unix.Stat_t
|
|
||||||
// fstatat() was added to Linux in kernel 2.6.16
|
|
||||||
// Go only supports 2.6.32 or later
|
|
||||||
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
|
||||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
|
||||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
|
||||||
if stat.Rdev != 0 {
|
|
||||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
|
||||||
}
|
|
||||||
setTime := func(key string, t unix.Timespec) {
|
|
||||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
|
||||||
// different Linux architectures so we need to cast them to
|
|
||||||
// int64 here and hence need to quiet the linter about
|
|
||||||
// unecessary casts.
|
|
||||||
//
|
|
||||||
// nolint: unconvert
|
|
||||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
|
||||||
}
|
|
||||||
setTime("atime", stat.Atim)
|
|
||||||
setTime("mtime", stat.Mtim)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
//go:build plan9 || js
|
|
||||||
// +build plan9 js
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Read the metadata from the file into metadata where possible
|
|
||||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
|
||||||
info, err := o.fs.lstat(o.path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
|
|
||||||
m.Set("mtime", info.ModTime().Format(metadataTimeFormat))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
//go:build openbsd || solaris
|
|
||||||
// +build openbsd solaris
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Read the metadata from the file into metadata where possible
|
|
||||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
|
||||||
info, err := o.fs.lstat(o.path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(o, "didn't return Stat_t as expected")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
|
||||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
|
||||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
|
||||||
if stat.Rdev != 0 {
|
|
||||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
|
||||||
}
|
|
||||||
setTime := func(key string, t syscall.Timespec) {
|
|
||||||
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
|
|
||||||
}
|
|
||||||
setTime("atime", stat.Atim)
|
|
||||||
setTime("mtime", stat.Mtim)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Read the metadata from the file into metadata where possible
|
|
||||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
|
||||||
info, err := o.fs.lstat(o.path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
stat, ok := info.Sys().(*syscall.Win32FileAttributeData)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(o, "didn't return Win32FileAttributeData as expected")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// FIXME do something with stat.FileAttributes ?
|
|
||||||
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
|
|
||||||
setTime := func(key string, t syscall.Filetime) {
|
|
||||||
m.Set(key, time.Unix(0, t.Nanoseconds()).Format(metadataTimeFormat))
|
|
||||||
}
|
|
||||||
setTime("atime", stat.LastAccessTime)
|
|
||||||
setTime("mtime", stat.LastWriteTime)
|
|
||||||
setTime("btime", stat.CreationTime)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const haveSetBTime = false
|
|
||||||
|
|
||||||
// setBTime changes the birth time of the file passed in
|
|
||||||
func setBTime(name string, btime time.Time) error {
|
|
||||||
// Does nothing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const haveSetBTime = true
|
|
||||||
|
|
||||||
// setBTime sets the birth time of the file passed in
|
|
||||||
func setBTime(name string, btime time.Time) (err error) {
|
|
||||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
closeErr := syscall.Close(h)
|
|
||||||
if err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
|
||||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
|
||||||
}
|
|
||||||
@@ -1,116 +0,0 @@
|
|||||||
//go:build !openbsd && !plan9
|
|
||||||
// +build !openbsd,!plan9
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/pkg/xattr"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
xattrPrefix = "user." // FIXME is this correct for all unixes?
|
|
||||||
xattrSupported = xattr.XATTR_SUPPORTED
|
|
||||||
)
|
|
||||||
|
|
||||||
// Check to see if the error supplied is a not supported error, and if
|
|
||||||
// so, disable xattrs
|
|
||||||
func (f *Fs) xattrIsNotSupported(err error) bool {
|
|
||||||
xattrErr, ok := err.(*xattr.Error)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
|
|
||||||
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
|
|
||||||
// Show xattrs not supported
|
|
||||||
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
|
|
||||||
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// getXattr returns the extended attributes for an object
|
|
||||||
//
|
|
||||||
// It doesn't return any attributes owned by this backend in
|
|
||||||
// metadataKeys
|
|
||||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
|
||||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var list []string
|
|
||||||
if o.fs.opt.FollowSymlinks {
|
|
||||||
list, err = xattr.List(o.path)
|
|
||||||
} else {
|
|
||||||
list, err = xattr.LList(o.path)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if o.fs.xattrIsNotSupported(err) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to read xattr: %w", err)
|
|
||||||
}
|
|
||||||
if len(list) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
metadata = make(fs.Metadata, len(list))
|
|
||||||
for _, k := range list {
|
|
||||||
var v []byte
|
|
||||||
if o.fs.opt.FollowSymlinks {
|
|
||||||
v, err = xattr.Get(o.path, k)
|
|
||||||
} else {
|
|
||||||
v, err = xattr.LGet(o.path, k)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if o.fs.xattrIsNotSupported(err) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
|
|
||||||
}
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if !strings.HasPrefix(k, xattrPrefix) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
k = k[len(xattrPrefix):]
|
|
||||||
if _, found := systemMetadataInfo[k]; found {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metadata[k] = string(v)
|
|
||||||
}
|
|
||||||
return metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setXattr sets the metadata on the file Xattrs
|
|
||||||
//
|
|
||||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
|
||||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
|
||||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for k, value := range metadata {
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if _, found := systemMetadataInfo[k]; found {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
k = xattrPrefix + k
|
|
||||||
v := []byte(value)
|
|
||||||
if o.fs.opt.FollowSymlinks {
|
|
||||||
err = xattr.Set(o.path, k, v)
|
|
||||||
} else {
|
|
||||||
err = xattr.LSet(o.path, k, v)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if o.fs.xattrIsNotSupported(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
//go:build openbsd || plan9
|
|
||||||
// +build openbsd plan9
|
|
||||||
|
|
||||||
// The pkg/xattr module doesn't compile for openbsd or plan9
|
|
||||||
package local
|
|
||||||
|
|
||||||
import "github.com/rclone/rclone/fs"
|
|
||||||
|
|
||||||
const (
|
|
||||||
xattrSupported = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// getXattr returns the extended attributes for an object
|
|
||||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setXattr sets the metadata on the file Xattrs
|
|
||||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -16,9 +16,9 @@ import (
|
|||||||
|
|
||||||
// protocol errors
|
// protocol errors
|
||||||
var (
|
var (
|
||||||
ErrorPrematureEOF = errors.New("premature EOF")
|
ErrorPrematureEOF = errors.New("Premature EOF")
|
||||||
ErrorInvalidLength = errors.New("invalid length")
|
ErrorInvalidLength = errors.New("Invalid length")
|
||||||
ErrorZeroTerminate = errors.New("string must end with zero")
|
ErrorZeroTerminate = errors.New("String must end with zero")
|
||||||
)
|
)
|
||||||
|
|
||||||
// BinWriter is a binary protocol writer
|
// BinWriter is a binary protocol writer
|
||||||
@@ -69,11 +69,6 @@ func (w *BinWriter) WritePu64(val int64) {
|
|||||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteP64 writes an signed long as unsigned varint
|
|
||||||
func (w *BinWriter) WriteP64(val int64) {
|
|
||||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString writes a zero-terminated string
|
// WriteString writes a zero-terminated string
|
||||||
func (w *BinWriter) WriteString(str string) {
|
func (w *BinWriter) WriteString(str string) {
|
||||||
buf := []byte(str)
|
buf := []byte(str)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package api provides types used by the Mail.ru API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package mailru provides an interface to the Mail.ru Cloud storage system.
|
|
||||||
package mailru
|
package mailru
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -92,12 +91,7 @@ func init() {
|
|||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: `Password.
|
Help: "Password.",
|
||||||
|
|
||||||
This must be an app password - rclone will not work with your normal
|
|
||||||
password. See the Configuration section in the docs for how to make an
|
|
||||||
app password.
|
|
||||||
`,
|
|
||||||
Required: true,
|
Required: true,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -441,10 +435,10 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
|||||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||||
}
|
}
|
||||||
if err == nil && !tokenIsValid(t) {
|
if err == nil && !tokenIsValid(t) {
|
||||||
err = errors.New("invalid token")
|
err = errors.New("Invalid token")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to authorize: %w", err)
|
return fmt.Errorf("Failed to authorize: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
|
if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
|
||||||
@@ -586,7 +580,7 @@ func readBodyWord(res *http.Response) (word string, err error) {
|
|||||||
word = strings.Split(line, " ")[0]
|
word = strings.Split(line, " ")[0]
|
||||||
}
|
}
|
||||||
if word == "" {
|
if word == "" {
|
||||||
return "", errors.New("empty reply from dispatcher")
|
return "", errors.New("Empty reply from dispatcher")
|
||||||
}
|
}
|
||||||
return word, nil
|
return word, nil
|
||||||
}
|
}
|
||||||
@@ -636,7 +630,6 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
|||||||
|
|
||||||
// itemToEntry converts API item to rclone directory entry
|
// itemToEntry converts API item to rclone directory entry
|
||||||
// The dirSize return value is:
|
// The dirSize return value is:
|
||||||
//
|
|
||||||
// <0 - for a file or in case of error
|
// <0 - for a file or in case of error
|
||||||
// =0 - for an empty directory
|
// =0 - for an empty directory
|
||||||
// >0 - for a non-empty directory
|
// >0 - for a non-empty directory
|
||||||
@@ -646,7 +639,12 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
|||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime := time.Unix(int64(item.Mtime), 0)
|
mTime := int64(item.Mtime)
|
||||||
|
if mTime < 0 {
|
||||||
|
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||||
|
mTime = 0
|
||||||
|
}
|
||||||
|
modTime := time.Unix(mTime, 0)
|
||||||
|
|
||||||
isDir, err := f.isDir(item.Kind, remote)
|
isDir, err := f.isDir(item.Kind, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1686,7 +1684,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
|
spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create spool file: %w", err)
|
return fmt.Errorf("Failed to create spool file: %w", err)
|
||||||
}
|
}
|
||||||
if o.putByHash(ctx, mrHash, src, "spool") {
|
if o.putByHash(ctx, mrHash, src, "spool") {
|
||||||
// If put by hash is successful, ignore transitive error
|
// If put by hash is successful, ignore transitive error
|
||||||
@@ -1725,7 +1723,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(fileHash, newHash) {
|
if bytes.Compare(fileHash, newHash) != 0 {
|
||||||
if o.fs.opt.CheckHash {
|
if o.fs.opt.CheckHash {
|
||||||
return mrhash.ErrorInvalidHash
|
return mrhash.ErrorInvalidHash
|
||||||
}
|
}
|
||||||
@@ -1968,7 +1966,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) error {
|
|||||||
return fs.ErrorIsDir
|
return fs.ErrorIsDir
|
||||||
}
|
}
|
||||||
if newObj.remote != o.remote {
|
if newObj.remote != o.remote {
|
||||||
return fmt.Errorf("file %q path has changed to %q", o.remote, newObj.remote)
|
return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote)
|
||||||
}
|
}
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = newObj.size
|
o.size = newObj.size
|
||||||
@@ -2058,7 +2056,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
|||||||
req.WritePu16(0) // revision
|
req.WritePu16(0) // revision
|
||||||
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
|
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
|
||||||
req.WritePu64(o.size)
|
req.WritePu64(o.size)
|
||||||
req.WriteP64(o.modTime.Unix())
|
req.WritePu64(o.modTime.Unix())
|
||||||
req.WritePu32(0)
|
req.WritePu32(0)
|
||||||
req.Write(o.mrHash)
|
req.Write(o.mrHash)
|
||||||
|
|
||||||
@@ -2264,7 +2262,7 @@ func (e *endHandler) handle(err error) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
newHash := e.hasher.Sum(nil)
|
newHash := e.hasher.Sum(nil)
|
||||||
if bytes.Equal(o.mrHash, newHash) {
|
if bytes.Compare(o.mrHash, newHash) == 0 {
|
||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
if o.fs.opt.CheckHash {
|
if o.fs.opt.CheckHash {
|
||||||
@@ -2279,7 +2277,7 @@ type serverPool struct {
|
|||||||
pool pendingServerMap
|
pool pendingServerMap
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
path string
|
path string
|
||||||
expirySec int
|
expirySec time.Duration
|
||||||
fs *Fs
|
fs *Fs
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2320,7 +2318,7 @@ func (p *serverPool) Dispatch(ctx context.Context, current string) (string, erro
|
|||||||
})
|
})
|
||||||
if err != nil || url == "" {
|
if err != nil || url == "" {
|
||||||
closeBody(res)
|
closeBody(res)
|
||||||
return "", fmt.Errorf("failed to request file server: %w", err)
|
return "", fmt.Errorf("Failed to request file server: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p.addServer(url, now)
|
p.addServer(url, now)
|
||||||
@@ -2386,7 +2384,7 @@ func (p *serverPool) addServer(url string, now time.Time) {
|
|||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
expiry := now.Add(time.Duration(p.expirySec) * time.Second)
|
expiry := now.Add(p.expirySec * time.Second)
|
||||||
|
|
||||||
expiryStr := []byte("-")
|
expiryStr := []byte("-")
|
||||||
if p.fs.ci.LogLevel >= fs.LogLevelInfo {
|
if p.fs.ci.LogLevel >= fs.LogLevelInfo {
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ type Fs struct {
|
|||||||
|
|
||||||
// Object describes a mega object
|
// Object describes a mega object
|
||||||
//
|
//
|
||||||
// Will definitely have info but maybe not meta.
|
// Will definitely have info but maybe not meta
|
||||||
//
|
//
|
||||||
// Normally rclone would just store an ID here but go-mega and mega.nz
|
// Normally rclone would just store an ID here but go-mega and mega.nz
|
||||||
// expect you to build an entire tree of all the objects in memory.
|
// expect you to build an entire tree of all the objects in memory.
|
||||||
@@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
|
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
|
||||||
}
|
}
|
||||||
// i is number of directories to create (may be 0)
|
// i is number of directories to create (may be 0)
|
||||||
// node is directory to create them from
|
// node is directory to create them from
|
||||||
@@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
|||||||
return f._rootNode, nil
|
return f._rootNode, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for preexisting root
|
// Check for pre-existing root
|
||||||
absRoot := f.srv.FS.GetRoot()
|
absRoot := f.srv.FS.GetRoot()
|
||||||
node, err := f.findDir(absRoot, f.root)
|
node, err := f.findDir(absRoot, f.root)
|
||||||
//log.Printf("findRoot findDir %p %v", node, err)
|
//log.Printf("findRoot findDir %p %v", node, err)
|
||||||
@@ -536,7 +536,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
// must have setMetaData called on it
|
// must have setMetaData called on it
|
||||||
//
|
//
|
||||||
// Returns the dirNode, object, leaf and error.
|
// Returns the dirNode, object, leaf and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||||
@@ -554,7 +554,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
@@ -576,7 +576,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
|
|
||||||
// PutUnchecked the object
|
// PutUnchecked the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
@@ -749,9 +749,9 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -979,6 +979,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
@@ -1114,7 +1115,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
|||||||
@@ -418,7 +418,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
|
|
||||||
// Put the object into the bucket
|
// Put the object into the bucket
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -463,9 +463,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -12,6 +12,5 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: ":memory:",
|
RemoteName: ":memory:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user