mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
215 Commits
v1.52.3
...
fix-vfs-vd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
394a4b0afe | ||
|
|
67098511db | ||
|
|
07b2ce4ab2 | ||
|
|
80d2f38192 | ||
|
|
0792f4722c | ||
|
|
db37360a1d | ||
|
|
44ff766f98 | ||
|
|
bfa5715017 | ||
|
|
e2183ad661 | ||
|
|
e2201689cf | ||
|
|
0f72aa8a5f | ||
|
|
b2f4f52b64 | ||
|
|
c65ed26a7e | ||
|
|
df5dbaf49b | ||
|
|
80fe1f16db | ||
|
|
f524a4c1cc | ||
|
|
c61c3cddbd | ||
|
|
51767aee23 | ||
|
|
cd3d7e2dca | ||
|
|
4f7f5404ce | ||
|
|
d4b2709fb0 | ||
|
|
e6fdc3a932 | ||
|
|
63ebe4ca8d | ||
|
|
8d5bc7f28b | ||
|
|
50e36fb482 | ||
|
|
a1c5e76c27 | ||
|
|
54f2587c1e | ||
|
|
99c293a403 | ||
|
|
fefcbf60fa | ||
|
|
96c2fdb445 | ||
|
|
8301a72453 | ||
|
|
05ddef117a | ||
|
|
15402e46c9 | ||
|
|
939860eb85 | ||
|
|
530dc77cde | ||
|
|
5db15cb157 | ||
|
|
06a12f5e27 | ||
|
|
143abe39f2 | ||
|
|
ee04732cbb | ||
|
|
79455cc71e | ||
|
|
042e5fe097 | ||
|
|
d273a9d82d | ||
|
|
3eded3c4ac | ||
|
|
20f4fda3c9 | ||
|
|
ed32a759ed | ||
|
|
ef2d036884 | ||
|
|
746c41f527 | ||
|
|
b0fb457746 | ||
|
|
b9ff495483 | ||
|
|
8506066926 | ||
|
|
43018973ac | ||
|
|
7e4ba54608 | ||
|
|
2f66355f20 | ||
|
|
7781ea8d59 | ||
|
|
ce065614e2 | ||
|
|
fa472a340e | ||
|
|
279a516c53 | ||
|
|
9ac5c6de14 | ||
|
|
58a7faa281 | ||
|
|
496a87a665 | ||
|
|
e4e53a2e61 | ||
|
|
28255f1bac | ||
|
|
917cb4acb3 | ||
|
|
d84527a730 | ||
|
|
7d0783aad5 | ||
|
|
7622506fe2 | ||
|
|
ae8bbc63da | ||
|
|
79f5d940cf | ||
|
|
25662b9e05 | ||
|
|
c820576329 | ||
|
|
af601575cb | ||
|
|
c7eae60944 | ||
|
|
0afd5a2204 | ||
|
|
92cb21f0f2 | ||
|
|
0031130111 | ||
|
|
2a3b377d34 | ||
|
|
2aed3bf9ab | ||
|
|
ec4e0e4d58 | ||
|
|
696d012c05 | ||
|
|
61ff7306ae | ||
|
|
0bcf4769fe | ||
|
|
0bfbecf9cb | ||
|
|
9058ec32e1 | ||
|
|
61e4b4db42 | ||
|
|
fd7c63bc78 | ||
|
|
49a7d08a40 | ||
|
|
2c10ce64aa | ||
|
|
a41a294e1d | ||
|
|
47b17dc1bb | ||
|
|
5f75444ef6 | ||
|
|
54fda3422e | ||
|
|
fcc2db8093 | ||
|
|
89b7ffbd5c | ||
|
|
ada43b0e58 | ||
|
|
5050c33162 | ||
|
|
4e8fda228d | ||
|
|
cdfb3f7194 | ||
|
|
a2dd23efd3 | ||
|
|
fa43d02874 | ||
|
|
d0de39ebcd | ||
|
|
2121c0fa23 | ||
|
|
a8652e2252 | ||
|
|
81151523af | ||
|
|
3e82771413 | ||
|
|
9445b12328 | ||
|
|
4bb103ef43 | ||
|
|
0dba7b8a46 | ||
|
|
e247811db5 | ||
|
|
6768f999ed | ||
|
|
ce767bc3cf | ||
|
|
e780cda1d4 | ||
|
|
a55d882b7b | ||
|
|
5c5ad62208 | ||
|
|
62a1a561cf | ||
|
|
ce394426b0 | ||
|
|
6606602f1e | ||
|
|
b6b8958fb4 | ||
|
|
d8eea0e397 | ||
|
|
df9c930581 | ||
|
|
85bcacac90 | ||
|
|
4b4ee72796 | ||
|
|
40611fc4fc | ||
|
|
7c4ba9fcb2 | ||
|
|
a1c9612d75 | ||
|
|
33c8709439 | ||
|
|
5e6f4ab281 | ||
|
|
3efdf5e095 | ||
|
|
d174b97af7 | ||
|
|
fff8822239 | ||
|
|
7cfe3760f4 | ||
|
|
298bd640f3 | ||
|
|
945a37d0d2 | ||
|
|
68afa28b27 | ||
|
|
d6a9017298 | ||
|
|
da862f82cf | ||
|
|
f8b6727190 | ||
|
|
2d88d24881 | ||
|
|
62650a3eb3 | ||
|
|
2c4f7b61c1 | ||
|
|
a3f6fe5287 | ||
|
|
8d85c51a28 | ||
|
|
17d5a72416 | ||
|
|
c4ce260b49 | ||
|
|
4808958f93 | ||
|
|
b58bb03e95 | ||
|
|
ba7fbfa8a7 | ||
|
|
117ff1d781 | ||
|
|
160c97da13 | ||
|
|
0760bc09aa | ||
|
|
5ca82e2f05 | ||
|
|
746a6ef8d3 | ||
|
|
763944f673 | ||
|
|
f4d7e41f24 | ||
|
|
f9306218f8 | ||
|
|
fb06427c69 | ||
|
|
93bd601149 | ||
|
|
848c5b78e1 | ||
|
|
84d5df3c84 | ||
|
|
63e6d9d2d1 | ||
|
|
6a2b7b97d7 | ||
|
|
d8d19072c5 | ||
|
|
830ab37371 | ||
|
|
7e48ee8758 | ||
|
|
d55053098f | ||
|
|
63cf0b1cdd | ||
|
|
5866b1b017 | ||
|
|
8493f3939c | ||
|
|
095f4e9b9d | ||
|
|
a1382a03aa | ||
|
|
844b903595 | ||
|
|
a3b3e1f646 | ||
|
|
b23cf58a41 | ||
|
|
ba5eb230fb | ||
|
|
2ea15a72bc | ||
|
|
b5c654a100 | ||
|
|
6807b0e42f | ||
|
|
16422a6b78 | ||
|
|
b2ded6212b | ||
|
|
88df5927f9 | ||
|
|
8c37262e05 | ||
|
|
3c14a893fb | ||
|
|
05bc19c331 | ||
|
|
40fe97e946 | ||
|
|
7458d37d2a | ||
|
|
c4110780bf | ||
|
|
d729004554 | ||
|
|
c0521791db | ||
|
|
55ad1354b6 | ||
|
|
fb61ed8506 | ||
|
|
4c7f7582fd | ||
|
|
a4f1f3d4e8 | ||
|
|
973e3d6a7b | ||
|
|
b62d08d136 | ||
|
|
50e31c6636 | ||
|
|
151f03378f | ||
|
|
26fb9007da | ||
|
|
3b20335d2a | ||
|
|
8d55367a6a | ||
|
|
187ee62e3d | ||
|
|
10e2ec1fbb | ||
|
|
83999cd1d1 | ||
|
|
fef90ef0a9 | ||
|
|
72ae5626b0 | ||
|
|
eee28d0d39 | ||
|
|
b59999dd59 | ||
|
|
e62c032184 | ||
|
|
1635b37ff1 | ||
|
|
8774381e2e | ||
|
|
cbfe7a405b | ||
|
|
80391fbcd4 | ||
|
|
cbf3d43561 | ||
|
|
e7bd392a69 | ||
|
|
764b90a519 | ||
|
|
d785942ed5 | ||
|
|
1cceadaf7c |
21
.github/workflows/build.yml
vendored
21
.github/workflows/build.yml
vendored
@@ -197,8 +197,9 @@ jobs:
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
|
||||
make travis_beta
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
|
||||
make ci_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
@@ -227,13 +228,13 @@ jobs:
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
-dest build \
|
||||
.
|
||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
||||
# xgo \
|
||||
# -image=billziss/xgo-cgofuse \
|
||||
# -targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -tags cmount \
|
||||
# -dest build \
|
||||
# .
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
@@ -247,7 +248,7 @@ jobs:
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make circleci_upload
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
|
||||
25
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
25
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
33
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
33
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
@@ -79,8 +79,15 @@ request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
|
||||
```
|
||||
git log # See how many commits you want to squash
|
||||
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||
git commit # Add a new commit message.
|
||||
git push --force # Push the squashed commit to your GitHub repo.
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
||||
```
|
||||
|
||||
## CI for your fork ##
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ FROM golang AS builder
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN make quicktest
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
@@ -12,7 +11,8 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse
|
||||
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
36
Makefile
36
Makefile
@@ -1,6 +1,6 @@
|
||||
SHELL = bash
|
||||
# Branch we are working on
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Tag of the current commit, if any. If this is not "" then we are building a release
|
||||
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
# Version of last release (may not be on this branch)
|
||||
@@ -61,6 +61,10 @@ vars:
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
btest:
|
||||
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
||||
@echo "Copied markdown of beta release to clip board"
|
||||
|
||||
version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
@@ -86,11 +90,15 @@ check: rclone
|
||||
build_dep:
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u ./...
|
||||
@@ -105,16 +113,16 @@ tidy:
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
|
||||
pandoc -s --from markdown-smart --to html MANUAL.md -o MANUAL.html
|
||||
|
||||
MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
@@ -183,14 +191,7 @@ log_since_last_release:
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
@@ -200,10 +201,7 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
travis_beta:
|
||||
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
@@ -212,7 +210,7 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
# Fetch the binary builds from GitHub actions
|
||||
fetch_binaries:
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
|
||||
23
RELEASE.md
23
RELEASE.md
@@ -63,14 +63,14 @@ If rclone needs a point release due to some horrendous bug:
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.49
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
|
||||
* BASE_TAG=v1.XX # eg v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-fixes
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
@@ -79,7 +79,7 @@ Now
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-fixes
|
||||
* git push --tags -u origin ${BASE_TAG}-stable
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
@@ -92,20 +92,21 @@ Now
|
||||
* git co master
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on docker hub. If it doesn't
|
||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
```
|
||||
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
docker push rclone/rclone:1.49.1
|
||||
docker push rclone/rclone:1.49
|
||||
docker pull golang
|
||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
docker push rclone/rclone:1.52.0
|
||||
docker push rclone/rclone:1.52
|
||||
docker push rclone/rclone:1
|
||||
docker push rclone/rclone:latest
|
||||
```
|
||||
|
||||
@@ -514,10 +514,6 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -665,7 +661,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return nil, err
|
||||
}
|
||||
// If not create it
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -696,13 +692,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -724,10 +714,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// create the destination directory if necessary
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -797,54 +783,24 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Find ID of src parent
|
||||
findPath = srcRemote
|
||||
var srcDirectoryID string
|
||||
if srcRemote == "" {
|
||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||
} else {
|
||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
||||
}
|
||||
_, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -890,10 +846,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1037,7 +989,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !plan9,!solaris
|
||||
// +build !plan9,!solaris,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris
|
||||
// +build !plan9,!solaris,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris
|
||||
// +build !plan9,!solaris,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris
|
||||
// +build plan9 solaris !go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -337,3 +337,11 @@ type CopyFileRequest struct {
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
}
|
||||
|
||||
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
||||
type CopyPartRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
}
|
||||
|
||||
206
backend/b2/b2.go
206
backend/b2/b2.go
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
@@ -54,6 +55,9 @@ const (
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -113,6 +117,16 @@ Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6GB.`,
|
||||
Default: largeFileCopyCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size. Must fit in memory.
|
||||
@@ -150,6 +164,18 @@ The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.`,
|
||||
Default: fs.Duration(7 * 24 * time.Hour),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -173,10 +199,13 @@ type Options struct {
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -199,7 +228,8 @@ type Fs struct {
|
||||
uploads map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a b2 object
|
||||
@@ -335,7 +365,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
f.fillBufferTokens() // reset the buffer tokens
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -396,6 +425,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
_bucketType: make(map[string]string, 1),
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -410,7 +446,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
f.fillBufferTokens()
|
||||
err = f.authorizeAccount(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
@@ -533,32 +568,25 @@ func (f *Fs) clearUploadURL(bucketID string) {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Fill up (or reset) the buffer tokens
|
||||
func (f *Fs) fillBufferTokens() {
|
||||
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
|
||||
//
|
||||
// If noBuf is set then it just gets an upload token
|
||||
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
|
||||
f.uploadToken.Get()
|
||||
if !noBuf {
|
||||
buf = f.pool.Get()
|
||||
}
|
||||
}
|
||||
|
||||
// getUploadBlock gets a block from the pool of size chunkSize
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
if buf == nil {
|
||||
buf = make([]byte, f.opt.ChunkSize)
|
||||
}
|
||||
// fs.Debugf(f, "Getting upload block %p", buf)
|
||||
return buf
|
||||
}
|
||||
|
||||
// putUploadBlock returns a block to the pool of size chunkSize
|
||||
func (f *Fs) putUploadBlock(buf []byte) {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) != int(f.opt.ChunkSize) {
|
||||
panic("bad blocksize returned to pool")
|
||||
// putBuf returns a buffer to the memory pool and an upload token
|
||||
//
|
||||
// If noBuf is set then it just returns the upload token
|
||||
func (f *Fs) putBuf(buf []byte, noBuf bool) {
|
||||
if !noBuf {
|
||||
f.pool.Put(buf)
|
||||
}
|
||||
// fs.Debugf(f, "Returning upload block %p", buf)
|
||||
f.bufferTokens <- buf
|
||||
f.uploadToken.Put()
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
@@ -1205,6 +1233,63 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
|
||||
}
|
||||
|
||||
// copy does a server side copy from dstObj <- srcObj
|
||||
//
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
||||
if srcObj.size >= int64(f.opt.CopyCutoff) {
|
||||
if newInfo == nil {
|
||||
newInfo, err = srcObj.getMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, dstObj, nil, srcObj, f.opt.CopyCutoff, true, newInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload(ctx)
|
||||
}
|
||||
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
err = f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
destBucketID, err := f.getBucketID(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_file",
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: srcObj.id,
|
||||
Name: f.opt.Enc.FromStandardPath(dstPath),
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
if newInfo == nil {
|
||||
request.MetadataDirective = "COPY"
|
||||
} else {
|
||||
request.MetadataDirective = "REPLACE"
|
||||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dstObj.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
@@ -1215,47 +1300,21 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
destBucketID, err := f.getBucketID(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_file",
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: srcObj.id,
|
||||
Name: f.opt.Enc.FromStandardPath(dstPath),
|
||||
MetadataDirective: "COPY",
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o := &Object{
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err = o.decodeMetaDataFileInfo(&response)
|
||||
err := f.copy(ctx, dstObj, srcObj, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
@@ -1298,7 +1357,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
bucket, bucketPath := f.split(remote)
|
||||
var RootURL string
|
||||
if f.opt.DownloadURL == "" {
|
||||
@@ -1526,28 +1585,10 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, bucketPath := o.split()
|
||||
info.Info[timeKey] = timeString(modTime)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_file",
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: o.id,
|
||||
Name: o.fs.opt.Enc.FromStandardPath(bucketPath), // copy to same name
|
||||
MetadataDirective: "REPLACE",
|
||||
ContentType: info.ContentType,
|
||||
Info: info.Info,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
|
||||
// Copy to the same name, overwriting the metadata only
|
||||
return o.fs.copy(ctx, o, o, info)
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
@@ -1723,7 +1764,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
if size == -1 {
|
||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||
buf := o.fs.getUploadBlock()
|
||||
buf := o.fs.getBuf(false)
|
||||
|
||||
n, err := io.ReadFull(in, buf)
|
||||
if err == nil {
|
||||
bufReader := bufio.NewReader(in)
|
||||
@@ -1733,22 +1775,24 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
o.fs.putUploadBlock(buf)
|
||||
o.fs.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
// NB Stream returns the buffer and token
|
||||
return up.Stream(ctx, buf)
|
||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putUploadBlock(buf)
|
||||
defer o.fs.putBuf(buf, false)
|
||||
size = int64(n)
|
||||
in = bytes.NewReader(buf[:n])
|
||||
} else {
|
||||
o.fs.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
@@ -68,20 +70,26 @@ func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
doCopy bool // doing copy rather than upload
|
||||
what string // text name of operation for logs
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
chunkSize int64 // chunk size to use
|
||||
src *Object // if copying, object we are reading from
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
@@ -89,8 +97,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
@@ -99,7 +107,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
@@ -110,18 +117,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
if newInfo == nil {
|
||||
modTime := src.ModTime(ctx)
|
||||
request.ContentType = fs.MimeType(ctx, src)
|
||||
request.Info = map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
}
|
||||
} else {
|
||||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -131,18 +144,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
doCopy: doCopy,
|
||||
what: "upload",
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
chunkSize: int64(chunkSize),
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
wrap: wrap,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
if doCopy {
|
||||
up.what = "copy"
|
||||
up.src = src.(*Object)
|
||||
} else {
|
||||
up.in, up.wrap = accounting.UnWrap(in)
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
@@ -256,9 +275,41 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy a chunk
|
||||
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_part",
|
||||
}
|
||||
offset := (part - 1) * up.chunkSize // where we are in the source file
|
||||
var request = api.CopyPartRequest{
|
||||
SourceID: up.src.id,
|
||||
LargeFileID: up.id,
|
||||
PartNumber: part,
|
||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||
}
|
||||
var response api.UploadPartResponse
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
up.sha1s[part-1] = response.SHA1
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error copying chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done copying chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
@@ -280,6 +331,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
@@ -292,139 +344,139 @@ func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(ctx, part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel(ctx)
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err)
|
||||
}
|
||||
return up.finish(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
//
|
||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Transfer initial chunk
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
hasMoreParts = true
|
||||
)
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
|
||||
g.Go(func() error {
|
||||
for part := int64(1); hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var buf []byte
|
||||
if part == 1 {
|
||||
buf = initialUploadBlock
|
||||
} else {
|
||||
buf = up.f.getBuf(false)
|
||||
}
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
if part == 1 {
|
||||
n = len(buf)
|
||||
} else {
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
up.f.putBuf(buf, false)
|
||||
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, false)
|
||||
return up.transferChunk(gCtx, part, buf)
|
||||
})
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
return nil
|
||||
})
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
return up.finish(ctx)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
)
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := up.f.getBuf(up.doCopy)
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return nil
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
|
||||
if !up.doCopy {
|
||||
// Read the chunk
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, up.doCopy)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
err = up.copyChunk(gCtx, part, reqSize)
|
||||
}
|
||||
return err
|
||||
})
|
||||
remaining -= reqSize
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||
reqSize = int64(up.f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
return nil
|
||||
})
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
return up.finish(ctx)
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
@@ -112,7 +113,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally.",
|
||||
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
@@ -153,6 +154,7 @@ func init() {
|
||||
}
|
||||
|
||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
@@ -327,7 +329,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -615,10 +617,6 @@ OUTER:
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -659,7 +657,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -715,13 +713,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -746,10 +738,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -956,64 +944,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, directoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
|
||||
_, err = f.move(ctx, "/folders/", srcID, dstLeaf, dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1022,7 +960,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
var opts rest.Opts
|
||||
if err == nil {
|
||||
@@ -1061,6 +999,66 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// deletePermanently permenently deletes a trashed file
|
||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
NoResponse: true,
|
||||
}
|
||||
if itemType == api.ItemTypeFile {
|
||||
opts.Path = "/files/" + id + "/trash"
|
||||
} else {
|
||||
opts.Path = "/folders/" + id + "/trash"
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/trash/items",
|
||||
Parameters: url.Values{
|
||||
"fields": []string{"type", "id"},
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
||||
offset := 0
|
||||
for {
|
||||
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
||||
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list trash")
|
||||
}
|
||||
for i := range result.Entries {
|
||||
item := &result.Entries[i]
|
||||
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
}
|
||||
offset += result.Limit
|
||||
if offset >= result.TotalCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
@@ -1268,7 +1266,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote := o.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1303,6 +1301,7 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
@@ -182,15 +183,13 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(ctx, session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
defer atexit.OnError(&err, func() {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(ctx, session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
}()
|
||||
})()
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
|
||||
14
backend/cache/cache.go
vendored
14
backend/cache/cache.go
vendored
@@ -1829,6 +1829,19 @@ func (f *Fs) isRootInPath(p string) bool {
|
||||
return strings.HasPrefix(p, f.Root()+"/")
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
do := f.Fs.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
_ = f.cache.RemoveDir(dir.Remote())
|
||||
}
|
||||
return do(ctx, dirs)
|
||||
}
|
||||
|
||||
// DirCacheFlush flushes the dir cache
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
_ = f.cache.RemoveDir("")
|
||||
@@ -1926,4 +1939,5 @@ var (
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
)
|
||||
|
||||
3
backend/cache/cache_internal_test.go
vendored
3
backend/cache/cache_internal_test.go
vendored
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
@@ -299,6 +300,7 @@ func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -342,6 +344,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
||||
@@ -72,6 +72,20 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server side operations (eg copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
|
||||
This can be used, for example, to change file name encryption type
|
||||
without re-uploading all the data. Just make two crypt backends
|
||||
pointing to two different directories with the single changed
|
||||
parameter and use rclone move to move the files between the crypt
|
||||
remotes.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: `For all files listed show how the names encrypt.
|
||||
@@ -181,6 +195,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CanHaveEmptyDirectories: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -193,6 +208,7 @@ type Options struct {
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
}
|
||||
|
||||
@@ -656,7 +672,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
do := f.Fs.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
@@ -664,9 +680,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
// assume it is a directory
|
||||
return do(ctx, f.cipher.EncryptDirName(remote))
|
||||
return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink)
|
||||
}
|
||||
return do(ctx, o.(*Object).Object.Remote())
|
||||
return do(ctx, o.(*Object).Object.Remote(), expire, unlink)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
|
||||
@@ -18,12 +18,12 @@ import (
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -56,6 +57,7 @@ const (
|
||||
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
|
||||
driveFolderType = "application/vnd.google-apps.folder"
|
||||
shortcutMimeType = "application/vnd.google-apps.shortcut"
|
||||
shortcutMimeTypeDangling = "application/vnd.google-apps.shortcut.dangling" // synthetic mime type for internal use
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
defaultMinSleep = fs.Duration(100 * time.Millisecond)
|
||||
@@ -68,6 +70,8 @@ const (
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -226,7 +230,7 @@ in with the ID of the root folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
@@ -346,9 +350,12 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: "Impersonate this user when using a service account.",
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.
|
||||
|
||||
Note that if this is used then "root_folder_id" will be ignored.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
@@ -558,6 +565,9 @@ type Fs struct {
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
fileFields googleapi.Field // fields to fetch file info with
|
||||
m configmap.Mapper
|
||||
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
||||
listRmu *sync.Mutex // protects listRempties
|
||||
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
@@ -999,7 +1009,7 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
@@ -1079,11 +1089,14 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(opt),
|
||||
m: m,
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(opt),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.fileFields = f.getFileFields()
|
||||
@@ -1109,9 +1122,20 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// If impersonating warn about root_folder_id if set and unset it
|
||||
//
|
||||
// This is because rclone v1.51 and v1.52 cached root_folder_id when
|
||||
// using impersonate which they shouldn't have done. It is possible
|
||||
// someone is using impersonate and root_folder_id in which case this
|
||||
// breaks their workflow. There isn't an easy way around that.
|
||||
if opt.RootFolderID != "" && opt.RootFolderID != "appDataFolder" && opt.Impersonate != "" {
|
||||
fs.Logf(f, "Ignoring cached root_folder_id when using --drive-impersonate")
|
||||
opt.RootFolderID = ""
|
||||
}
|
||||
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if opt.RootFolderID != "" {
|
||||
// override root folder if set or cached in the config
|
||||
// override root folder if set or cached in the config and not impersonating
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
} else if f.isTeamDrive {
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
@@ -1128,7 +1152,10 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
m.Set("root_folder_id", rootID)
|
||||
// Don't cache the root folder ID if impersonating
|
||||
if opt.Impersonate == "" {
|
||||
m.Set("root_folder_id", rootID)
|
||||
}
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||
@@ -1330,6 +1357,10 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// and not from a listing. This is unlikely.
|
||||
fs.Debugf(remote, "Ignoring shortcut as skip shortcuts is set")
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case info.MimeType == shortcutMimeTypeDangling:
|
||||
// Pretend a dangling shortcut is a regular object
|
||||
// It will error if used, but appear in listings so it can be deleted
|
||||
return f.newRegularObject(remote, info), nil
|
||||
case info.Md5Checksum != "" || info.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
return f.newRegularObject(remote, info), nil
|
||||
@@ -1402,6 +1433,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
pathID = actualID(pathID)
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
Description: leaf,
|
||||
@@ -1562,10 +1594,6 @@ func (f *Fs) findImportFormat(mimeType string) string {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1634,15 +1662,17 @@ func (s listRSlices) Less(i, j int) bool {
|
||||
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||
// nil is send to the out channel and the function returns.
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error) {
|
||||
var dirs []string
|
||||
var paths []string
|
||||
var grouping int32
|
||||
|
||||
for dir := range in {
|
||||
dirs = append(dirs[:0], dir.id)
|
||||
paths = append(paths[:0], dir.path)
|
||||
grouping = atomic.LoadInt32(&f.grouping)
|
||||
waitloop:
|
||||
for i := 1; i < grouping; i++ {
|
||||
for i := int32(1); i < grouping; i++ {
|
||||
select {
|
||||
case d, ok := <-in:
|
||||
if !ok {
|
||||
@@ -1655,6 +1685,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
}
|
||||
listRSlices{dirs, paths}.Sort()
|
||||
var iErr error
|
||||
foundItems := false
|
||||
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
// shared with me items have no parents when at the root
|
||||
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
||||
@@ -1662,6 +1693,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
}
|
||||
for _, parent := range item.Parents {
|
||||
var i int
|
||||
foundItems = true
|
||||
earlyExit := false
|
||||
// If only one item in paths then no need to search for the ID
|
||||
// assuming google drive is doing its job properly.
|
||||
@@ -1702,6 +1734,53 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
}
|
||||
return false
|
||||
})
|
||||
// Found no items in more than one directory. Retry these as
|
||||
// individual directories This is to work around a bug in google
|
||||
// drive where (A in parents) or (B in parents) returns nothing
|
||||
// sometimes. See #3114, #4289 and
|
||||
// https://issuetracker.google.com/issues/149522397
|
||||
if len(dirs) > 1 && !foundItems {
|
||||
if atomic.SwapInt32(&f.grouping, 1) != 1 {
|
||||
fs.Logf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
}
|
||||
var recycled = make([]listREntry, len(dirs))
|
||||
f.listRmu.Lock()
|
||||
for i := range dirs {
|
||||
recycled[i] = listREntry{id: dirs[i], path: paths[i]}
|
||||
// Make a note of these dirs - if they all turn
|
||||
// out to be empty then we can re-enable grouping
|
||||
f.listRempties[dirs[i]] = struct{}{}
|
||||
}
|
||||
f.listRmu.Unlock()
|
||||
// recycle these in the background so we don't deadlock
|
||||
// the listR runners if they all get here
|
||||
wg.Add(len(recycled))
|
||||
go func() {
|
||||
for _, entry := range recycled {
|
||||
in <- entry
|
||||
}
|
||||
fs.Debugf(f, "Recycled %d entries", len(recycled))
|
||||
}()
|
||||
}
|
||||
// If using a grouping of 1 and dir was empty then check to see if it
|
||||
// is part of the group that caused grouping to be disabled.
|
||||
if grouping == 1 && len(dirs) == 1 && !foundItems {
|
||||
f.listRmu.Lock()
|
||||
if _, found := f.listRempties[dirs[0]]; found {
|
||||
// Remove the ID
|
||||
delete(f.listRempties, dirs[0])
|
||||
// If no empties left => all the directories that
|
||||
// triggered the grouping being set to 1 were actually
|
||||
// empty so must have made a mistake
|
||||
if len(f.listRempties) == 0 {
|
||||
if atomic.SwapInt32(&f.grouping, listRGrouping) != listRGrouping {
|
||||
fs.Logf(f, "Re-enabling ListR as previous detection was in error")
|
||||
}
|
||||
}
|
||||
}
|
||||
f.listRmu.Unlock()
|
||||
}
|
||||
|
||||
for range dirs {
|
||||
wg.Done()
|
||||
}
|
||||
@@ -1736,15 +1815,6 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
const (
|
||||
grouping = 50
|
||||
inputBuffer = 1000
|
||||
)
|
||||
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1753,7 +1823,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
in := make(chan listREntry, inputBuffer)
|
||||
in := make(chan listREntry, listRInputBuffer)
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
@@ -1766,6 +1836,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
select {
|
||||
case in <- job:
|
||||
// Adding the wg after we've entered the item is
|
||||
// safe here because we know when the callback
|
||||
// is called we are holding a waitgroup.
|
||||
wg.Add(1)
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
@@ -1779,7 +1852,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, grouping)
|
||||
go f.listRRunner(ctx, &wg, in, out, cb)
|
||||
}
|
||||
go func() {
|
||||
// wait until the all directories are processed
|
||||
@@ -1789,8 +1862,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu.Lock()
|
||||
l := len(overflow)
|
||||
// only fill half of the channel to prevent entries being put into overflow again
|
||||
if l > inputBuffer/2 {
|
||||
l = inputBuffer / 2
|
||||
if l > listRInputBuffer/2 {
|
||||
l = listRInputBuffer / 2
|
||||
}
|
||||
wg.Add(l)
|
||||
for _, d := range overflow[:l] {
|
||||
@@ -1912,11 +1985,18 @@ func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error)
|
||||
}
|
||||
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
||||
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
|
||||
item.MimeType = shortcutMimeTypeDangling
|
||||
return item, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to resolve shortcut")
|
||||
}
|
||||
// make sure we use the Name and Parents from the original item
|
||||
// make sure we use the Name, Parents and Trashed from the original item
|
||||
newItem.Name = item.Name
|
||||
newItem.Parents = item.Parents
|
||||
newItem.Trashed = item.Trashed
|
||||
// the new ID is a composite ID
|
||||
newItem.Id = joinID(newItem.Id, item.Id)
|
||||
return newItem, nil
|
||||
@@ -1949,7 +2029,7 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry,
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) {
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2112,13 +2192,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2291,11 +2365,11 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
}
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.delete(ctx, shortcutID(f.dirCache.RootID()), f.opt.UseTrash)
|
||||
err = f.delete(ctx, shortcutID(rootID), f.opt.UseTrash)
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2430,7 +2504,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
@@ -2479,77 +2553,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = srcLeaf
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, dstDirectoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryID = actualID(dstDirectoryID)
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src parent
|
||||
var srcDirectoryID string
|
||||
if srcRemote == "" {
|
||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||
} else {
|
||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcDirectoryID = actualID(srcDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Do the move
|
||||
patch := drive.File{
|
||||
Name: leaf,
|
||||
Name: dstLeaf,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.Update(shortcutID(srcID), &patch).
|
||||
@@ -2801,11 +2817,10 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
isDir := false
|
||||
if srcPath == "" {
|
||||
// source is root directory
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
srcID, err = f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcID = f.dirCache.RootID()
|
||||
isDir = true
|
||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||
if err != fs.ErrorNotAFile {
|
||||
@@ -3034,7 +3049,7 @@ func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, "", "", "", false, fs.ErrorObjectNotFound
|
||||
@@ -3219,6 +3234,9 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.mimeType == shortcutMimeTypeDangling {
|
||||
return nil, errors.New("can't read dangling shortcut")
|
||||
}
|
||||
if o.v2Download {
|
||||
var v2File *drive_v2.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -274,7 +274,7 @@ func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = "file name.txt"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -782,11 +782,17 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
// FIXME this gives settings_error/not_authorized/.. errors
|
||||
// and the expires setting isn't in the documentation so remove
|
||||
// for now.
|
||||
// Settings: &sharing.SharedLinkSettings{
|
||||
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
|
||||
// },
|
||||
}
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -148,11 +148,6 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
}
|
||||
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -264,7 +264,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -334,7 +334,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -389,13 +389,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -403,11 +397,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -50,8 +50,19 @@ func init() {
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: "Use FTP over TLS (Implicit)",
|
||||
Name: "tls",
|
||||
Help: `Use FTPS over TLS (Implicit)
|
||||
When using implicit FTP over TLS the client will connect using TLS
|
||||
right from the start, which in turn breaks the compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
Help: `Use FTP over TLS (Explicit)
|
||||
When using explicit FTP over TLS the client explicitly request
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
@@ -90,6 +101,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
@@ -152,12 +164,21 @@ func (f *Fs) Features() *fs.Features {
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||
if f.opt.TLS {
|
||||
if f.opt.TLS && f.opt.ExplicitTLS {
|
||||
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
} else if f.opt.TLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -38,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
@@ -79,7 +79,8 @@ func init() {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
if saFile != "" || saCreds != "" {
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil)
|
||||
@@ -98,11 +99,15 @@ func init() {
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
@@ -265,6 +270,7 @@ type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
Anonymous bool `config:"anonymous"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
@@ -405,13 +411,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
if opt.Anonymous {
|
||||
oAuthClient = &http.Client{}
|
||||
} else if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
albums.add(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"one": {a1},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
@@ -39,7 +39,7 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
"one": a1,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
"": {"one"},
|
||||
}, albums.path)
|
||||
|
||||
a2 := &api.Album{
|
||||
@@ -49,8 +49,8 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
albums.add(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2},
|
||||
"one": {a1},
|
||||
"two": {a2},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
@@ -61,7 +61,7 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
"two": a2,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two"},
|
||||
"": {"one", "two"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a duplicate
|
||||
@@ -72,8 +72,8 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
albums.add(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
@@ -86,7 +86,7 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"": {"one", "two {2}", "two {2a}"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a sub directory
|
||||
@@ -97,9 +97,9 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
"one/sub": {a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
@@ -114,8 +114,8 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
"": {"one", "two {2}", "two {2a}"},
|
||||
"one": {"sub"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a weird path
|
||||
@@ -126,10 +126,10 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
albums.add(a0)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"{0}": []*api.Album{a0},
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
"{0}": {a0},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
"one/sub": {a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"0": a0,
|
||||
@@ -146,8 +146,8 @@ func TestAlbumsAdd(t *testing.T) {
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}", "{0}"},
|
||||
"one": []string{"sub"},
|
||||
"": {"one", "two {2}", "two {2a}", "{0}"},
|
||||
"one": {"sub"},
|
||||
}, albums.path)
|
||||
}
|
||||
|
||||
@@ -181,9 +181,9 @@ func TestAlbumsDel(t *testing.T) {
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
"one/sub": {a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
@@ -198,16 +198,16 @@ func TestAlbumsDel(t *testing.T) {
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
"": {"one", "two {2}", "two {2a}"},
|
||||
"one": {"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
"one/sub": {a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2": a2,
|
||||
@@ -220,16 +220,16 @@ func TestAlbumsDel(t *testing.T) {
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
"": {"one", "two {2}", "two {2a}"},
|
||||
"one": {"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
"one/sub": {a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2a": a2a,
|
||||
@@ -240,16 +240,16 @@ func TestAlbumsDel(t *testing.T) {
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
"": {"one", "two {2a}"},
|
||||
"one": {"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
"one/sub": {a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1sub": a1sub,
|
||||
@@ -258,16 +258,16 @@ func TestAlbumsDel(t *testing.T) {
|
||||
"one/sub": a1sub,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
"one": []string{"sub"},
|
||||
"": {"one"},
|
||||
"one": {"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
"one": {a1},
|
||||
"two": {a2, a2a},
|
||||
"one/sub": {a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -39,26 +41,33 @@ import (
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configTokenURL = "tokenURL"
|
||||
configVersion = 1
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://jfs.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configTokenURL = "tokenURL"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configVersion = 1
|
||||
|
||||
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
v1configVersion = 0
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
ClientID: "jottacli",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
@@ -83,7 +92,7 @@ func init() {
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
}
|
||||
refresh = ver != configVersion
|
||||
refresh = (ver != configVersion) && (ver != v1configVersion)
|
||||
}
|
||||
|
||||
if refresh {
|
||||
@@ -98,42 +107,12 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
clientConfig := *fs.Config
|
||||
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
|
||||
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
token, err := doAuth(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
v1config(ctx, name, m)
|
||||
} else {
|
||||
v2config(ctx, name, m)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
@@ -150,11 +129,6 @@ func init() {
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unlink",
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_resume_limit",
|
||||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||
@@ -181,7 +155,6 @@ type Options struct {
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -257,8 +230,181 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
// v1config configure a jottacloud backend using legacy authentification
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm(false) {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = v1ClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = v1EncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
||||
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
token, err := doAuthV1(ctx, srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(v1configVersion))
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
charset := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: v1registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuthV1 runs the actual token request for V1 authentification
|
||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
}
|
||||
|
||||
// doAuthV2 runs the actual token request for V2 authentification
|
||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
@@ -469,6 +615,29 @@ func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if v1tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
@@ -480,28 +649,55 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Check config version
|
||||
var ok bool
|
||||
var version string
|
||||
if version, ok = m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
var ver int
|
||||
version, ok := m.Get("configVersion")
|
||||
if ok {
|
||||
ver, err = strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = ver == configVersion
|
||||
ok = (ver == configVersion) || (ver == v1configVersion)
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == v1configVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = v1ClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = v1EncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
||||
|
||||
// add the request filter to fix token refresh
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
}
|
||||
|
||||
// Create OAuth Client
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
@@ -891,7 +1087,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
retry, _ := shouldRetry(resp, err)
|
||||
return (retry && resp.StatusCode != 500), err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -995,6 +1192,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
||||
// dir gets moved regardless
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.StatusCode == 500 {
|
||||
_, err := f.NewObject(ctx, dstRemote)
|
||||
if err == fs.ErrorNotAFile {
|
||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
@@ -1002,14 +1211,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(remote),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
if f.opt.Unlink {
|
||||
if unlink {
|
||||
opts.Parameters.Set("mode", "disableShare")
|
||||
} else {
|
||||
opts.Parameters.Set("mode", "enableShare")
|
||||
@@ -1029,12 +1238,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if f.opt.Unlink {
|
||||
if unlink {
|
||||
return "", errors.Wrap(err, "couldn't remove public link")
|
||||
}
|
||||
return "", errors.Wrap(err, "couldn't create public link")
|
||||
}
|
||||
if f.opt.Unlink {
|
||||
if unlink {
|
||||
if result.PublicSharePath != "" {
|
||||
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath)
|
||||
}
|
||||
|
||||
@@ -603,7 +603,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
|
||||
}
|
||||
|
||||
// PublicLink creates a public link to the remote path
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -90,7 +89,24 @@ are being uploaded and aborts with a message which starts "can't copy
|
||||
|
||||
However on some file systems this modification time check may fail (eg
|
||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||
check can be disabled with this flag.`,
|
||||
check can be disabled with this flag.
|
||||
|
||||
If this flag is set, rclone will use its best efforts to transfer a
|
||||
file which is being updated. If the file is only having things
|
||||
appended to it (eg a log) then rclone will transfer the log file with
|
||||
the size it had the first time rclone saw it.
|
||||
|
||||
If the file is being modified throughout (not just appended to) then
|
||||
the transfer may fail with a hash check failure.
|
||||
|
||||
In detail, once the file has had stat() called on it for the first
|
||||
time we:
|
||||
|
||||
- Only transfer the size that stat gave
|
||||
- Only checksum the size that stat gave
|
||||
- Don't update the stat info for the file
|
||||
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -166,20 +182,22 @@ type Fs struct {
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
objectHashesMu sync.Mutex // global lock for Object.hashes
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
objectMetaMu sync.RWMutex // global lock for Object metadata
|
||||
}
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path (encoded path)
|
||||
path string // The local path (OS path)
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
translatedLink bool // Is this object a translated link
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path (encoded path)
|
||||
path string // The local path (OS path)
|
||||
// When using these items the fs.objectMetaMu must be held
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
// these are read only and don't need the mutex held
|
||||
translatedLink bool // Is this object a translated link
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -214,6 +232,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
SlowHash: true,
|
||||
}).Fill(f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -615,6 +634,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := f.newObject(remote)
|
||||
dstObj.fs.objectMetaMu.RLock()
|
||||
dstObjMode := dstObj.mode
|
||||
dstObj.fs.objectMetaMu.RUnlock()
|
||||
|
||||
// Check it is a file if it exists
|
||||
err := dstObj.lstat()
|
||||
@@ -622,7 +644,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.fs.isRegular(dstObj.mode) {
|
||||
} else if !dstObj.fs.isRegular(dstObjMode) {
|
||||
// It isn't a file
|
||||
return nil, errors.New("can't move file onto non-file")
|
||||
}
|
||||
@@ -776,8 +798,10 @@ func (o *Object) Remote() string {
|
||||
// Hash returns the requested hash of a file as a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
// Check that the underlying file hasn't changed
|
||||
o.fs.objectMetaMu.RLock()
|
||||
oldtime := o.modTime
|
||||
oldsize := o.size
|
||||
o.fs.objectMetaMu.RUnlock()
|
||||
err := o.lstat()
|
||||
var changed bool
|
||||
if err != nil {
|
||||
@@ -789,15 +813,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
}
|
||||
} else {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
|
||||
o.fs.objectMetaMu.RUnlock()
|
||||
}
|
||||
|
||||
o.fs.objectHashesMu.Lock()
|
||||
hashes := o.hashes
|
||||
o.fs.objectMetaMu.RLock()
|
||||
hashValue, hashFound := o.hashes[r]
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
o.fs.objectMetaMu.RUnlock()
|
||||
|
||||
if changed || hashes == nil || !hashFound {
|
||||
if changed || !hashFound {
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
@@ -809,9 +834,14 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
// If not checking for updates, only read size given
|
||||
if o.fs.opt.NoCheckUpdated {
|
||||
in = readers.NewLimitedReadCloser(in, o.size)
|
||||
}
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
var hashes map[hash.Type]string
|
||||
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
@@ -821,24 +851,28 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", errors.Wrap(closeErr, "hash: failed to close")
|
||||
}
|
||||
hashValue = hashes[r]
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.fs.objectMetaMu.Lock()
|
||||
if o.hashes == nil {
|
||||
o.hashes = hashes
|
||||
} else {
|
||||
o.hashes[r] = hashValue
|
||||
}
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
return hashValue, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
defer o.fs.objectMetaMu.RUnlock()
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
defer o.fs.objectMetaMu.RUnlock()
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
@@ -859,7 +893,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
|
||||
// Storable returns a boolean showing if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
mode := o.mode
|
||||
o.fs.objectMetaMu.RUnlock()
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
@@ -892,11 +928,15 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
|
||||
file.o.fs.objectMetaMu.RLock()
|
||||
oldtime := file.o.modTime
|
||||
oldsize := file.o.size
|
||||
file.o.fs.objectMetaMu.RUnlock()
|
||||
if oldsize != fi.Size() {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
|
||||
if !oldtime.Equal(fi.ModTime()) {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -913,9 +953,9 @@ func (file *localOpenFile) Close() (err error) {
|
||||
err = file.in.Close()
|
||||
if err == nil {
|
||||
if file.hash.Size() == file.o.Size() {
|
||||
file.o.fs.objectHashesMu.Lock()
|
||||
file.o.fs.objectMetaMu.Lock()
|
||||
file.o.hashes = file.hash.Sums()
|
||||
file.o.fs.objectHashesMu.Unlock()
|
||||
file.o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -940,7 +980,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.size)
|
||||
offset, limit = x.Decode(o.Size())
|
||||
case *fs.HashesOption:
|
||||
if x.Hashes.Count() > 0 {
|
||||
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
|
||||
@@ -955,6 +995,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
}
|
||||
|
||||
// If not checking updated then limit to current size. This means if
|
||||
// file is being extended, readers will read a o.Size() bytes rather
|
||||
// than the new size making for a consistent upload.
|
||||
if limit < 0 && o.fs.opt.NoCheckUpdated {
|
||||
limit = o.size
|
||||
}
|
||||
|
||||
// Handle a translated link
|
||||
if o.translatedLink {
|
||||
return o.openTranslatedLink(offset, limit)
|
||||
@@ -1091,9 +1138,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// All successful so update the hashes
|
||||
if hasher != nil {
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.fs.objectMetaMu.Lock()
|
||||
o.hashes = hasher.Sums()
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
|
||||
// Set the mtime
|
||||
@@ -1151,17 +1198,15 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
|
||||
// setMetadata sets the file info from the os.FileInfo passed in
|
||||
func (o *Object) setMetadata(info os.FileInfo) {
|
||||
// Don't overwrite the info if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != info.Size() {
|
||||
o.size = info.Size()
|
||||
}
|
||||
if !o.modTime.Equal(info.ModTime()) {
|
||||
o.modTime = info.ModTime()
|
||||
}
|
||||
if o.mode != info.Mode() {
|
||||
o.mode = info.Mode()
|
||||
// if not checking updated then don't update the stat
|
||||
if o.fs.opt.NoCheckUpdated && !o.modTime.IsZero() {
|
||||
return
|
||||
}
|
||||
o.fs.objectMetaMu.Lock()
|
||||
o.size = info.Size()
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
@@ -1193,7 +1238,7 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1207,28 +1252,6 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Pattern to match a windows absolute path: "c:\" and similar
|
||||
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
|
||||
|
||||
// uncPath converts an absolute Windows path
|
||||
// to a UNC long path.
|
||||
func uncPath(l string) string {
|
||||
// If prefix is "\\", we already have a UNC path or server.
|
||||
if strings.HasPrefix(l, `\\`) {
|
||||
// If already long path, just keep it
|
||||
if strings.HasPrefix(l, `\\?\`) {
|
||||
return l
|
||||
}
|
||||
|
||||
// Trim "\\" from path and add UNC prefix.
|
||||
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
|
||||
}
|
||||
if isAbsWinDrive.MatchString(l) {
|
||||
return `\\?\` + l
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -5,49 +5,6 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var uncTestPaths = []string{
|
||||
`C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop`,
|
||||
`\\?\UNC\\share\folder\Desktop`,
|
||||
`\\server\share`,
|
||||
}
|
||||
|
||||
var uncTestPathsResults = []string{
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\UNC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\UNC\\share\folder\Desktop`,
|
||||
`\\?\UNC\server\share`,
|
||||
}
|
||||
|
||||
// Test that UNC paths are converted.
|
||||
func TestUncPaths(t *testing.T) {
|
||||
for i, p := range uncTestPaths {
|
||||
unc := uncPath(p)
|
||||
if unc != uncTestPathsResults[i] {
|
||||
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
|
||||
}
|
||||
// Test we don't add more.
|
||||
unc = uncPath(unc)
|
||||
if unc != uncTestPathsResults[i] {
|
||||
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test Windows character replacements
|
||||
var testsWindows = [][2]string{
|
||||
{`c:\temp`, `c:\temp`},
|
||||
|
||||
@@ -1450,7 +1450,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
// fs.Debugf(f, ">>> PublicLink %q", remote)
|
||||
|
||||
token, err := f.accessToken()
|
||||
|
||||
@@ -836,7 +836,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
root, err := f.findRoot(false)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
||||
|
||||
@@ -498,13 +498,13 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
var dirCacheFoundRoot bool
|
||||
var rootNormalizedID string
|
||||
if f.dirCache != nil {
|
||||
var dirCacheRootIDExists bool
|
||||
rootNormalizedID, dirCacheRootIDExists = f.dirCache.Get("")
|
||||
rootNormalizedID, err = f.dirCache.RootID(ctx, false)
|
||||
dirCacheRootIDExists := err == nil
|
||||
if f.root == "" {
|
||||
// if f.root == "", it means f.root is the absolute root of the drive
|
||||
// and its ID should have been found in NewFs
|
||||
dirCacheFoundRoot = dirCacheRootIDExists
|
||||
} else if _, err := f.dirCache.RootParentID(); err == nil {
|
||||
} else if _, err := f.dirCache.RootParentID(ctx, false); err == nil {
|
||||
// if root is in a folder, it must have a parent folder, and
|
||||
// if dirCache has found root in NewFs, the parent folder's ID
|
||||
// should be present.
|
||||
@@ -813,10 +813,6 @@ OUTER:
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -864,7 +860,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, leaf, directoryID, err
|
||||
}
|
||||
@@ -895,13 +891,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -924,10 +914,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1121,9 +1107,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
|
||||
if dstDriveID != srcObjDriveID {
|
||||
if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
fs.Debugf(f, "Can't move files between drives (%q != %q)", dstDriveID, srcObjDriveID)
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
@@ -1173,70 +1160,22 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, dstDirectoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
|
||||
if dstDriveID != srcDriveID {
|
||||
if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
fs.Debugf(f, "Can't move directories between drives (%q != %q)", dstDriveID, srcDriveID)
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
|
||||
if err != nil {
|
||||
@@ -1246,7 +1185,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(dstLeaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
@@ -1311,7 +1250,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1688,41 +1627,22 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
uploadURLChan := make(chan string, 1)
|
||||
gracefulCancel := func() {
|
||||
uploadURL, ok := <-uploadURLChan
|
||||
// Reading from uploadURLChan blocks the atexit process until
|
||||
// we are able to use uploadURL to cancel the upload
|
||||
if !ok { // createUploadSession failed - no need to cancel upload
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
cancelErr := o.cancelUploadSession(ctx, uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
}
|
||||
cancelFuncHandle := atexit.Register(gracefulCancel)
|
||||
|
||||
// Create upload session
|
||||
fs.Debugf(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession(ctx, modTime)
|
||||
if err != nil {
|
||||
close(uploadURLChan)
|
||||
atexit.Unregister(cancelFuncHandle)
|
||||
return nil, err
|
||||
}
|
||||
uploadURL := session.UploadURL
|
||||
uploadURLChan <- uploadURL
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Error encountered during upload: %v", err)
|
||||
gracefulCancel()
|
||||
// Cancel the session if something went wrong
|
||||
defer atexit.OnError(&err, func() {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.cancelUploadSession(ctx, uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
atexit.Unregister(cancelFuncHandle)
|
||||
}()
|
||||
})()
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
@@ -1869,6 +1789,17 @@ func parseNormalizedID(ID string) (string, string, string) {
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// Returns the canonical form of the driveID
|
||||
func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
|
||||
if driveID == "" {
|
||||
canonicalDriveID = f.opt.DriveID
|
||||
} else {
|
||||
canonicalDriveID = driveID
|
||||
}
|
||||
canonicalDriveID = strings.ToLower(canonicalDriveID)
|
||||
return canonicalDriveID
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
|
||||
@@ -280,13 +280,7 @@ func errorHandler(resp *http.Response) error {
|
||||
// Mkdir creates the folder if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// fs.Debugf(nil, "Mkdir(\"%s\")", dir)
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -312,10 +306,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -483,58 +473,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, directoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -546,9 +486,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
moveFolderData := moveCopyFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderID: srcID,
|
||||
DstFolderID: directoryID,
|
||||
DstFolderID: dstDirectoryID,
|
||||
Move: "true",
|
||||
NewFolderName: leaf,
|
||||
NewFolderName: dstLeaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -620,7 +560,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, leaf, directoryID, err
|
||||
}
|
||||
@@ -803,10 +743,6 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// fs.Debugf(nil, "List(%v)", dir)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1079,7 +1015,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
|
||||
@@ -152,6 +152,14 @@ type ChecksumFileResult struct {
|
||||
Metadata Item `json:"metadata"`
|
||||
}
|
||||
|
||||
// PubLinkResult is returned from /getfilepublink and /getfolderpublink
|
||||
type PubLinkResult struct {
|
||||
Error
|
||||
LinkID int `json:"linkid"`
|
||||
Link string `json:"link"`
|
||||
LinkCode string `json:"code"`
|
||||
}
|
||||
|
||||
// UserInfo is returned from /userinfo
|
||||
type UserInfo struct {
|
||||
Error
|
||||
|
||||
@@ -42,7 +42,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.pcloud.com"
|
||||
defaultHostname = "api.pcloud.com"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -51,8 +51,8 @@ var (
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
TokenURL: "https://api.pcloud.com/oauth2_token",
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
@@ -60,17 +60,45 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
// Update the TokenURL with the actual hostname
|
||||
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
|
||||
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
updateTokenURL(oauthConfig, defaultHostname)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
optc := new(Options)
|
||||
err := configstruct.Set(m, optc)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to read config: %v", err)
|
||||
}
|
||||
updateTokenURL(oauthConfig, optc.Hostname)
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
return errors.New("form not found in response")
|
||||
}
|
||||
hostname := auth.Form.Get("hostname")
|
||||
if hostname == "" {
|
||||
hostname = defaultHostname
|
||||
}
|
||||
// Save the hostname in the config
|
||||
m.Set("hostname", hostname)
|
||||
// Update the token URL
|
||||
updateTokenURL(oauthConfig, hostname)
|
||||
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
|
||||
return nil
|
||||
}
|
||||
opt := oauthutil.Options{
|
||||
CheckAuth: checkAuth,
|
||||
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
|
||||
}
|
||||
err := oauthutil.Config("pcloud", name, m, oauthConfig, &opt)
|
||||
err = oauthutil.Config("pcloud", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
@@ -96,6 +124,13 @@ func init() {
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
|
||||
This is normally set when rclone initially does the oauth connection.`,
|
||||
Default: defaultHostname,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -104,6 +139,7 @@ func init() {
|
||||
type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
Hostname string `config:"hostname"`
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
@@ -198,7 +234,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -253,12 +289,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||
}
|
||||
updateTokenURL(oauthConfig, opt.Hostname)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
@@ -455,10 +492,6 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -499,7 +532,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -530,13 +563,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -548,10 +575,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -659,7 +682,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -668,7 +691,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
Path: "/trash_clear",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(f.dirCache.RootID()))
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(rootID))
|
||||
var resp *http.Response
|
||||
var result api.Error
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@@ -741,58 +764,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, directoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -804,8 +777,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(dstLeaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(dstDirectoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -827,6 +800,61 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (string, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/getfolderpublink",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var result api.PubLinkResult
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.Link, err
|
||||
}
|
||||
|
||||
func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (string, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/getfilepublink",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var result api.PubLinkResult
|
||||
opts.Parameters.Set("path", path)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.Link, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
dirID, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return f.linkFile(ctx, remote, expire)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return f.linkDir(ctx, dirID, expire)
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
@@ -1056,7 +1084,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote := o.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1161,6 +1189,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
||||
@@ -183,7 +183,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -450,10 +450,6 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -493,7 +489,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -544,13 +540,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -562,10 +552,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -745,75 +731,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, directoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find ID of src parent, not creating subdirs
|
||||
var srcLeaf, srcDirectoryID string
|
||||
findPath = srcRemote
|
||||
if srcRemote == "" {
|
||||
findPath = srcFs.root
|
||||
}
|
||||
srcLeaf, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
||||
srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.move(ctx, false, srcID, srcLeaf, leaf, srcDirectoryID, directoryID)
|
||||
err = f.move(ctx, false, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -822,7 +747,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
_, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err == nil {
|
||||
return "", fs.ErrorCantShareDirectories
|
||||
@@ -990,7 +915,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -197,10 +197,6 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -260,7 +256,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -458,13 +454,7 @@ func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -528,12 +518,11 @@ func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
rootIDs, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootID := atoi(f.dirCache.RootID())
|
||||
rootID := atoi(rootIDs)
|
||||
// Let putio delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", rootID)
|
||||
@@ -559,7 +548,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -598,7 +587,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -636,57 +625,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, dstDirectoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -695,7 +635,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
params := url.Values{}
|
||||
params.Set("file_id", srcID)
|
||||
params.Set("parent_id", dstDirectoryID)
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(dstLeaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -125,7 +125,7 @@ func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||
// Reads the entry for a file from putio
|
||||
func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
// defer log.Trace(o, "")("f=%+v, err=%v", f, &err)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -248,7 +248,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_ = resp.Body.Close()
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
return resp.Body, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
|
||||
@@ -356,6 +356,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(f)
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
)
|
||||
|
||||
@@ -346,12 +347,15 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
|
||||
if err = mu.initiate(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// Abort the transfer if returning an error
|
||||
if err != nil {
|
||||
_ = mu.abort()
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer atexit.OnError(&err, func() {
|
||||
fs.Debugf(mu, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := mu.abort()
|
||||
if cancelErr != nil {
|
||||
fs.Logf(mu, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
}()
|
||||
})()
|
||||
|
||||
ch := make(chan chunk, mu.cfg.concurrency)
|
||||
for i := 0; i < mu.cfg.concurrency; i++ {
|
||||
|
||||
351
backend/s3/s3.go
351
backend/s3/s3.go
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
@@ -52,13 +53,16 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/structs"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -68,6 +72,7 @@ func init() {
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your S3 provider.",
|
||||
@@ -95,6 +100,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Scaleway",
|
||||
Help: "Scaleway Object Storage",
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
@@ -175,10 +183,21 @@ func init() {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Provider: "Scaleway",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nl-ams",
|
||||
Help: "Amsterdam, The Netherlands",
|
||||
}, {
|
||||
Value: "fr-par",
|
||||
Help: "Paris, France",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba",
|
||||
Provider: "!AWS,Alibaba,Scaleway",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -360,6 +379,17 @@ func init() {
|
||||
Value: "oss-me-east-1.aliyuncs.com",
|
||||
Help: "Middle East 1 (Dubai)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Scaleway Object Storage.",
|
||||
Provider: "Scaleway",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.nl-ams.scw.cloud",
|
||||
Help: "Amsterdam Endpoint",
|
||||
}, {
|
||||
Value: "s3.fr-par.scw.cloud",
|
||||
Help: "Paris Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for StackPath Object Storage.",
|
||||
@@ -377,7 +407,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,StackPath",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -564,7 +594,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,StackPath",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -740,6 +770,21 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
Provider: "Scaleway",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "The Standard class for any upload; suitable for on-demand content like streaming or CDN.",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archived storage; prices are lower, but it needs to be restored first to be accessed.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
@@ -773,6 +818,21 @@ file you can stream upload is 48GB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "max_upload_parts",
|
||||
Help: `Maximum number of parts in a multipart upload.
|
||||
|
||||
This option defines the maximum number of multipart chunks to use
|
||||
when doing a multipart upload.
|
||||
|
||||
This can be useful if a service does not support the AWS S3
|
||||
specification of 10,000 chunks.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of a known size to stay below this number of chunks limit.
|
||||
`,
|
||||
Default: maxUploadParts,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
@@ -909,6 +969,7 @@ const (
|
||||
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
maxExpireDuration = fs.Duration(7 * 24 * time.Hour) // max expiry is 1 week
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -931,6 +992,7 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int64 `config:"max_upload_parts"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
@@ -1126,21 +1188,23 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
return nil, nil, errors.New("secret_access_key not found")
|
||||
}
|
||||
|
||||
if opt.Region == "" && opt.Endpoint == "" {
|
||||
opt.Endpoint = "https://s3.amazonaws.com/"
|
||||
}
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
||||
opt.MaxUploadParts = 1000
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint)
|
||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
||||
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
||||
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
@@ -1267,6 +1331,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
@@ -1839,21 +1904,19 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// We can try to abort the upload, but ignore the error.
|
||||
fs.Debugf(nil, "Cancelling multipart copy")
|
||||
_ = f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
defer atexit.OnError(&err, func() {
|
||||
// Try to abort the upload, but ignore the error.
|
||||
fs.Debugf(nil, "Cancelling multipart copy")
|
||||
_ = f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
}
|
||||
}()
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
})()
|
||||
|
||||
partSize := int64(f.opt.CopyCutoff)
|
||||
numParts := (srcSize-1)/partSize + 1
|
||||
@@ -1959,6 +2022,147 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
)
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
return "", fs.ErrorCantShareDirectories
|
||||
}
|
||||
if _, err := f.NewObject(ctx, remote); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if expire > maxExpireDuration {
|
||||
fs.Logf(f, "Public Link: Reducing expiry to %v as %v is greater than the max time allowed", maxExpireDuration, expire)
|
||||
expire = maxExpireDuration
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
httpReq, _ := f.c.GetObjectRequest(&s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
})
|
||||
|
||||
return httpReq.Presign(time.Duration(expire))
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "restore",
|
||||
Short: "Restore objects from GLACIER to normal storage",
|
||||
Long: `This command can be used to restore one or more objects from GLACIER
|
||||
to normal storage.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/object [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
|
||||
This flag also obeys the filters. Test first with -i/--interactive or --dry-run flags
|
||||
|
||||
rclone -i backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
It returns a list of status dictionaries with Remote and Status
|
||||
keys. The Status will be OK if it was successfull or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
{
|
||||
"Status": "OK",
|
||||
"Path": "test.txt"
|
||||
},
|
||||
{
|
||||
"Status": "OK",
|
||||
"Path": "test/file4.txt"
|
||||
}
|
||||
]
|
||||
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
||||
"lifetime": "Lifetime of the active copy in days",
|
||||
"description": "The optional description for the job.",
|
||||
},
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "restore":
|
||||
req := s3.RestoreObjectInput{
|
||||
//Bucket: &f.rootBucket,
|
||||
//Key: &encodedDirectory,
|
||||
RestoreRequest: &s3.RestoreRequest{},
|
||||
}
|
||||
if lifetime := opt["lifetime"]; lifetime != "" {
|
||||
ilifetime, err := strconv.ParseInt(lifetime, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "bad lifetime")
|
||||
}
|
||||
req.RestoreRequest.Days = &ilifetime
|
||||
}
|
||||
if priority := opt["priority"]; priority != "" {
|
||||
req.RestoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{
|
||||
Tier: &priority,
|
||||
}
|
||||
}
|
||||
if description := opt["description"]; description != "" {
|
||||
req.RestoreRequest.Description = &description
|
||||
}
|
||||
type status struct {
|
||||
Status string
|
||||
Remote string
|
||||
}
|
||||
var (
|
||||
outMu sync.Mutex
|
||||
out = []status{}
|
||||
)
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
// Remember this is run --checkers times concurrently
|
||||
o, ok := obj.(*Object)
|
||||
st := status{Status: "OK", Remote: obj.Remote()}
|
||||
defer func() {
|
||||
outMu.Lock()
|
||||
out = append(out, st)
|
||||
outMu.Unlock()
|
||||
}()
|
||||
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
st.Status = "Not an S3 object"
|
||||
return
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
reqCopy := req
|
||||
reqCopy.Bucket = &bucket
|
||||
reqCopy.Key = &bucketPath
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.RestoreObject(&reqCopy)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
st.Status = err.Error()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return out, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -2033,11 +2237,17 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
if awsErr.StatusCode() == http.StatusNotFound {
|
||||
// NotFound indicates bucket was OK
|
||||
// NoSuchBucket would be returned if bucket was bad
|
||||
if awsErr.Code() == "NotFound" {
|
||||
o.fs.cache.MarkOK(bucket)
|
||||
}
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucket)
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
@@ -2177,6 +2387,13 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
tokens := pacer.NewTokenDispenser(concurrency)
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
|
||||
// calculate size of parts
|
||||
partSize := int(f.opt.ChunkSize)
|
||||
|
||||
@@ -2186,31 +2403,24 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(partSize*maxUploadParts))
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*uploadParts))
|
||||
})
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/int64(partSize) >= maxUploadParts {
|
||||
if size/int64(partSize) >= uploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
partSize = int((((size / maxUploadParts) >> 20) + 1) << 20)
|
||||
partSize = int((((size / uploadParts) >> 20) + 1) << 20)
|
||||
}
|
||||
}
|
||||
|
||||
memPool := f.getMemoryPool(int64(partSize))
|
||||
|
||||
var mReq s3.CreateMultipartUploadInput
|
||||
structs.SetFrom(&mReq, req)
|
||||
var cout *s3.CreateMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
ACL: req.ACL,
|
||||
Key: req.Key,
|
||||
ContentType: req.ContentType,
|
||||
Metadata: req.Metadata,
|
||||
ServerSideEncryption: req.ServerSideEncryption,
|
||||
SSEKMSKeyId: req.SSEKMSKeyId,
|
||||
StorageClass: req.StorageClass,
|
||||
})
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2218,27 +2428,24 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
defer func() {
|
||||
defer atexit.OnError(&err, func() {
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// We can try to abort the upload, but ignore the error.
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
}()
|
||||
})()
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
@@ -2423,6 +2630,35 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.CacheControl = aws.String(value)
|
||||
case "content-disposition":
|
||||
req.ContentDisposition = aws.String(value)
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = aws.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = aws.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = aws.String(value)
|
||||
case "x-amz-tagging":
|
||||
req.Tagging = aws.String(value)
|
||||
default:
|
||||
const amzMetaPrefix = "x-amz-meta-"
|
||||
if strings.HasPrefix(lowerKey, amzMetaPrefix) {
|
||||
metaKey := lowerKey[len(amzMetaPrefix):]
|
||||
req.Metadata[metaKey] = aws.String(value)
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if multipart {
|
||||
err = o.uploadMultipart(ctx, &req, size, in)
|
||||
@@ -2463,18 +2699,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.HTTPOption:
|
||||
key, value := option.Header()
|
||||
httpReq.Header.Add(key, value)
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
@@ -2556,6 +2780,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
|
||||
@@ -972,7 +972,7 @@ func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
// ==================== Optional Interface fs.PublicLinker ====================
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
libraryName, filePath := f.splitPath(remote)
|
||||
if libraryName == "" {
|
||||
// We cannot share the whole seafile server, we need at least a library
|
||||
|
||||
@@ -74,7 +74,7 @@ func init() {
|
||||
Help: "Raw PEM-encoded private key, If specified, will override key_file parameter.",
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
@@ -193,6 +193,7 @@ type Options struct {
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
absRoot string
|
||||
opt Options // parsed options
|
||||
m configmap.Mapper // config
|
||||
features *fs.Features // optional features
|
||||
@@ -395,8 +396,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword) || opt.KeyUseAgent {
|
||||
// Add ssh agent-auth if no password or file or key PEM specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -491,6 +492,7 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
absRoot: root,
|
||||
opt: *opt,
|
||||
m: m,
|
||||
config: sshConfig,
|
||||
@@ -500,17 +502,27 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
}).Fill(f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
f.putSftpConnection(&c, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
||||
} else if !path.IsAbs(f.root) {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Using absolute root directory %q", f.absRoot)
|
||||
}
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
oldAbsRoot := f.absRoot
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
f.absRoot = path.Dir(f.absRoot)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
@@ -519,6 +531,7 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
f.absRoot = oldAbsRoot
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
@@ -601,7 +614,7 @@ func (f *Fs) dirExists(dir string) (bool, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
root := path.Join(f.root, dir)
|
||||
root := path.Join(f.absRoot, dir)
|
||||
ok, err := f.dirExists(root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "List failed")
|
||||
@@ -682,7 +695,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.root, parent))
|
||||
return f.mkdir(path.Join(f.absRoot, parent))
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
@@ -718,7 +731,7 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.root, dir)
|
||||
root := path.Join(f.absRoot, dir)
|
||||
return f.mkdir(root)
|
||||
}
|
||||
|
||||
@@ -734,7 +747,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
root := path.Join(f.absRoot, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
@@ -761,7 +774,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
err = c.sftpClient.Rename(
|
||||
srcObj.path(),
|
||||
path.Join(f.root, remote),
|
||||
path.Join(f.absRoot, remote),
|
||||
)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -788,8 +801,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
srcPath := path.Join(srcFs.absRoot, srcRemote)
|
||||
dstPath := path.Join(f.absRoot, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
ok, err := f.dirExists(dstPath)
|
||||
@@ -1075,7 +1088,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// path returns the native path of the object
|
||||
func (o *Object) path() string {
|
||||
return path.Join(o.fs.root, o.remote)
|
||||
return path.Join(o.fs.absRoot, o.remote)
|
||||
}
|
||||
|
||||
// setMetadata updates the info in the object from the stat result passed in
|
||||
@@ -1091,7 +1104,7 @@ func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "stat")
|
||||
}
|
||||
absPath := path.Join(f.root, remote)
|
||||
absPath := path.Join(f.absRoot, remote)
|
||||
info, err = c.sftpClient.Stat(absPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
return info, err
|
||||
|
||||
@@ -350,7 +350,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, id string, directoriesOnly b
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -700,10 +700,6 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -743,7 +739,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -799,13 +795,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -817,10 +807,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1033,75 +1019,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, directoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find ID of src parent, not creating subdirs
|
||||
var srcLeaf, srcDirectoryID string
|
||||
findPath = srcRemote
|
||||
if srcRemote == "" {
|
||||
findPath = srcFs.root
|
||||
}
|
||||
srcLeaf, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
||||
srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, false, srcID, srcLeaf, leaf, srcDirectoryID, directoryID)
|
||||
_, err = f.move(ctx, false, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1402,7 +1327,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
isLargeFile := size < 0 || size > int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.File, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -663,10 +663,6 @@ OUTER:
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -709,7 +705,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -765,13 +761,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -807,10 +797,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1033,64 +1019,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, directoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.moveDir(ctx, srcID, leaf, directoryID)
|
||||
err = f.moveDir(ctx, srcID, dstLeaf, dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1099,7 +1035,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
obj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1289,7 +1225,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote := o.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -447,6 +447,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(f)
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
@@ -504,7 +505,15 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
// making sure we read the full metadata for all 0 byte files.
|
||||
// We don't read the metadata for directory marker objects.
|
||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
||||
info = nil
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// We have a dangling large object here so just return the original metadata
|
||||
fs.Errorf(o, "dangling large object with no contents")
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return o, nil
|
||||
}
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
@@ -536,7 +545,7 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
||||
// container to the start.
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
@@ -570,7 +579,7 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
if remote == prefix {
|
||||
if !includeDirMarkers && remote == prefix {
|
||||
// If we have zero length directory markers ending in / then swift
|
||||
// will return them in the listing for the directory which causes
|
||||
// duplicate directories. Ignore them here.
|
||||
@@ -593,8 +602,8 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
||||
type addEntryFn func(fs.DirEntry) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
if isDirectory {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
||||
@@ -606,7 +615,7 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.Storable() {
|
||||
if includeDirMarkers || o.Storable() {
|
||||
err = fn(o)
|
||||
}
|
||||
}
|
||||
@@ -624,7 +633,7 @@ func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (en
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(container, directory, prefix, addContainer, false, func(entry fs.DirEntry) error {
|
||||
err = f.list(container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
@@ -694,7 +703,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
container, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(container, directory, prefix, addContainer, true, func(entry fs.DirEntry) error {
|
||||
return f.list(container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
@@ -841,7 +850,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
go func() {
|
||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||
}()
|
||||
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, func(entry fs.DirEntry) error {
|
||||
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, true, func(entry fs.DirEntry) error {
|
||||
if o, ok := entry.(*Object); ok {
|
||||
toBeDeleted <- o
|
||||
}
|
||||
@@ -1103,7 +1112,7 @@ func min(x, y int64) int64 {
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsContainer, prefix, err := o.getSegmentsDlo()
|
||||
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1124,6 +1133,9 @@ func (o *Object) removeSegments(except string) error {
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(segmentsContainer)
|
||||
if err == swift.ContainerNotFound || err == swift.ContainerNotEmpty {
|
||||
return false, err
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
|
||||
@@ -269,7 +269,9 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
fs.Debugf(f, "connecting...")
|
||||
defer fs.Debugf(f, "connected: %+v", err)
|
||||
|
||||
cfg := uplink.Config{}
|
||||
cfg := uplink.Config{
|
||||
UserAgent: "rclone",
|
||||
}
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
|
||||
@@ -226,6 +226,6 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// </d:response>
|
||||
// </d:multistatus>
|
||||
type Quota struct {
|
||||
Available int64 `xml:"DAV: response>propstat>prop>quota-available-bytes"`
|
||||
Used int64 `xml:"DAV: response>propstat>prop>quota-used-bytes"`
|
||||
Available string `xml:"DAV: response>propstat>prop>quota-available-bytes"`
|
||||
Used string `xml:"DAV: response>propstat>prop>quota-used-bytes"`
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -975,10 +976,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
</D:prop>
|
||||
</D:propfind>
|
||||
`))
|
||||
var q = api.Quota{
|
||||
Available: -1,
|
||||
Used: -1,
|
||||
}
|
||||
var q api.Quota
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -989,14 +987,14 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
if i, err := strconv.ParseInt(q.Used, 10, 64); err == nil && i >= 0 {
|
||||
usage.Used = fs.NewUsageValue(i)
|
||||
}
|
||||
if q.Available >= 0 {
|
||||
usage.Free = fs.NewUsageValue(q.Available)
|
||||
if i, err := strconv.ParseInt(q.Available, 10, 64); err == nil && i >= 0 {
|
||||
usage.Free = fs.NewUsageValue(i)
|
||||
}
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
if usage.Used != nil && usage.Free != nil {
|
||||
usage.Total = fs.NewUsageValue(*usage.Used + *usage.Free)
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -73,11 +73,6 @@ func init() {
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Yandex Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "unlink",
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -92,9 +87,8 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Token string `config:"token"`
|
||||
Unlink bool `config:"unlink"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Token string `config:"token"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote yandex
|
||||
@@ -801,9 +795,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
var path string
|
||||
if f.opt.Unlink {
|
||||
if unlink {
|
||||
path = "/resources/unpublish"
|
||||
} else {
|
||||
path = "/resources/publish"
|
||||
@@ -830,7 +824,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if f.opt.Unlink {
|
||||
if unlink {
|
||||
return "", errors.Wrap(err, "couldn't remove public link")
|
||||
}
|
||||
return "", errors.Wrap(err, "couldn't create public link")
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -20,6 +21,8 @@ import (
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -37,6 +40,8 @@ var (
|
||||
)
|
||||
|
||||
// GOOS/GOARCH pairs we build for
|
||||
//
|
||||
// If the GOARCH contains a - it is a synthetic arch with more parameters
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
@@ -45,15 +50,18 @@ var osarches = []string{
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
"linux/arm-v7",
|
||||
"linux/arm64",
|
||||
"linux/mips",
|
||||
"linux/mipsle",
|
||||
"freebsd/386",
|
||||
"freebsd/amd64",
|
||||
"freebsd/arm",
|
||||
"freebsd/arm-v7",
|
||||
"netbsd/386",
|
||||
"netbsd/amd64",
|
||||
"netbsd/arm",
|
||||
"netbsd/arm-v7",
|
||||
"openbsd/386",
|
||||
"openbsd/amd64",
|
||||
"plan9/386",
|
||||
@@ -66,6 +74,7 @@ var archFlags = map[string][]string{
|
||||
"386": {"GO386=387"},
|
||||
"mips": {"GOMIPS=softfloat"},
|
||||
"mipsle": {"GOMIPS=softfloat"},
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
@@ -168,12 +177,118 @@ func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// generate system object (syso) file to be picked up by a following go build for embedding icon and version info resources into windows executable
|
||||
func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
type M map[string]interface{}
|
||||
version := strings.TrimPrefix(versionTag, "v")
|
||||
semanticVersion := semver.New(version)
|
||||
|
||||
// Build json input to goversioninfo utility
|
||||
bs, err := json.Marshal(M{
|
||||
"FixedFileInfo": M{
|
||||
"FileVersion": M{
|
||||
"Major": semanticVersion.Major,
|
||||
"Minor": semanticVersion.Minor,
|
||||
"Patch": semanticVersion.Patch,
|
||||
},
|
||||
"ProductVersion": M{
|
||||
"Major": semanticVersion.Major,
|
||||
"Minor": semanticVersion.Minor,
|
||||
"Patch": semanticVersion.Patch,
|
||||
},
|
||||
},
|
||||
"StringFileInfo": M{
|
||||
"CompanyName": "https://rclone.org",
|
||||
"ProductName": "Rclone",
|
||||
"FileDescription": "Rsync for cloud storage",
|
||||
"InternalName": "rclone",
|
||||
"OriginalFilename": "rclone.exe",
|
||||
"LegalCopyright": "The Rclone Authors",
|
||||
"FileVersion": version,
|
||||
"ProductVersion": version,
|
||||
},
|
||||
"IconPath": "../graphics/logo/ico/logo_symbol_color.ico",
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Failed to build version info json: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Write json to temporary file that will only be used by the goversioninfo command executed below.
|
||||
jsonPath, err := filepath.Abs("versioninfo_windows_" + goarch + ".json") // Appending goos and goarch as suffix to avoid any race conditions
|
||||
if err != nil {
|
||||
log.Printf("Failed to resolve path: %v", err)
|
||||
return ""
|
||||
}
|
||||
err = ioutil.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
log.Printf("Failed to write %s: %v", jsonPath, err)
|
||||
return ""
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Remove(jsonPath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Printf("Warning: Couldn't remove generated %s: %v. Please remove it manually.", jsonPath, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Execute goversioninfo utility using the json file as input.
|
||||
// It will produce a system object (syso) file that a following go build should pick up.
|
||||
sysoPath, err := filepath.Abs("../resource_windows_" + goarch + ".syso") // Appending goos and goarch as suffix to avoid any race conditions, and also it is recognized by go build and avoids any builds for other systems considering it
|
||||
if err != nil {
|
||||
log.Printf("Failed to resolve path: %v", err)
|
||||
return ""
|
||||
}
|
||||
args := []string{
|
||||
"goversioninfo",
|
||||
"-o",
|
||||
sysoPath,
|
||||
}
|
||||
if goarch == "amd64" {
|
||||
args = append(args, "-64") // Make the syso a 64-bit coff file
|
||||
}
|
||||
args = append(args, jsonPath)
|
||||
err = runEnv(args, nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return sysoPath
|
||||
}
|
||||
|
||||
// delete generated system object (syso) resource file
|
||||
func cleanupResourceSyso(sysoFilePath string) {
|
||||
if sysoFilePath == "" {
|
||||
return
|
||||
}
|
||||
if err := os.Remove(sysoFilePath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Printf("Warning: Couldn't remove generated %s: %v. Please remove it manually.", sysoFilePath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trip a version suffix off the arch if present
|
||||
func stripVersion(goarch string) string {
|
||||
i := strings.Index(goarch, "-")
|
||||
if i < 0 {
|
||||
return goarch
|
||||
}
|
||||
return goarch[:i]
|
||||
}
|
||||
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
sysoPath := buildWindowsResourceSyso(goarch, version)
|
||||
if sysoPath == "" {
|
||||
log.Printf("Warning: Windows binaries will not have file information embedded")
|
||||
}
|
||||
defer cleanupResourceSyso(sysoPath)
|
||||
}
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
@@ -190,7 +305,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
}
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + goarch,
|
||||
"GOARCH=" + stripVersion(goarch),
|
||||
}
|
||||
if !*cgo {
|
||||
env = append(env, "CGO_ENABLED=0")
|
||||
|
||||
@@ -42,10 +42,10 @@ var (
|
||||
// Globals
|
||||
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
|
||||
osAliases = map[string][]string{
|
||||
"darwin": []string{"macos", "osx"},
|
||||
"darwin": {"macos", "osx"},
|
||||
}
|
||||
archAliases = map[string][]string{
|
||||
"amd64": []string{"x86_64"},
|
||||
"amd64": {"x86_64"},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -374,16 +374,13 @@ func untar(srcFile, fileName, extractDir string) {
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open output file: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err := out.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't close output: %v", err)
|
||||
}
|
||||
}()
|
||||
n, err := io.Copy(out, tarReader)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't write output file: %v", err)
|
||||
}
|
||||
if err = out.Close(); err != nil {
|
||||
log.Fatalf("Couldn't close output: %v", err)
|
||||
}
|
||||
log.Printf("Wrote %s (%d bytes) as %q", fileName, n, outPath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ conversion into man pages etc.
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
@@ -156,17 +157,19 @@ def read_commands(docpath):
|
||||
if command != "rclone.md":
|
||||
docs.append(read_command(command))
|
||||
return "\n".join(docs)
|
||||
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
build_date = datetime.utcfromtimestamp(
|
||||
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % datetime.now().strftime("%b %d, %Y"))
|
||||
""" % build_date.strftime("%b %d, %Y"))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
|
||||
73
bin/not-in-stable.go
Executable file
73
bin/not-in-stable.go
Executable file
@@ -0,0 +1,73 @@
|
||||
// This shows the commits not yet in the stable branch
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// version=$(sed <VERSION -e 's/\.[0-9]+*$//g')
|
||||
// echo "Checking version ${version}"
|
||||
// echo
|
||||
//
|
||||
// git log --oneline ${version}.0..${version}-stable | cut -c11- | sort > /tmp/in-stable
|
||||
// git log --oneline ${version}.0..master | cut -c11- | sort > /tmp/in-master
|
||||
//
|
||||
// comm -23 /tmp/in-master /tmp/in-stable
|
||||
|
||||
var logRe = regexp.MustCompile(`^([0-9a-f]{4,}) (.*)$`)
|
||||
|
||||
// run the test passed in with the -run passed in
|
||||
func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log: %v", err)
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
for _, line := range bytes.Split(out, []byte{'\n'}) {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line)
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
logs = append(logs, logMessage)
|
||||
}
|
||||
return logMap, logs
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0])
|
||||
}
|
||||
versionBytes, err := ioutil.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err)
|
||||
}
|
||||
i := bytes.LastIndexByte(versionBytes, '.')
|
||||
version := string(versionBytes[:i])
|
||||
log.Printf("Finding commits not in stable %s", version)
|
||||
masterMap, masterLogs := readCommits(version+".0", "master")
|
||||
stableMap, _ := readCommits(version+".0", version+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
// Commit found in stable already
|
||||
if _, found := stableMap[logMessage]; found {
|
||||
continue
|
||||
}
|
||||
hash := masterMap[logMessage]
|
||||
fmt.Printf("%s %s\n", hash, logMessage)
|
||||
}
|
||||
}
|
||||
97
bin/test-repeat.sh
Executable file
97
bin/test-repeat.sh
Executable file
@@ -0,0 +1,97 @@
|
||||
#!/bin/bash
|
||||
|
||||
# defaults
|
||||
buildflags=""
|
||||
binary="test.binary"
|
||||
flags=""
|
||||
iterations="100"
|
||||
logprefix="test.out"
|
||||
|
||||
help="
|
||||
This runs go tests repeatedly logging all the failures to separate
|
||||
files. It is very useful for debugging with printf for tests which
|
||||
don't fail very often.
|
||||
|
||||
Syntax: $0 [flags]
|
||||
|
||||
Note that flags for 'go test' need to be expanded, eg '-test.v' instead
|
||||
of just '-v'. '-race' does not need to be expanded.
|
||||
|
||||
Flags this script understands
|
||||
|
||||
-h, --help
|
||||
show this help
|
||||
-i=N, --iterations=N
|
||||
do N iterations (default ${iterations})
|
||||
-b=NAME,--binary=NAME
|
||||
call the output binary NAME (default ${binary})
|
||||
-l=NAME,--logprefix=NAME
|
||||
the log files generated will start with NAME (default ${logprefix})
|
||||
-race
|
||||
build the binary with race testing enabled
|
||||
-tags=TAGS
|
||||
build the binary with the tags supplied
|
||||
|
||||
Any other flags will be past to go test.
|
||||
|
||||
Example
|
||||
|
||||
$0 flags -race -test.run 'TestRWFileHandleOpenTests'
|
||||
|
||||
"
|
||||
|
||||
if [[ "$@" == "" ]]; then
|
||||
echo "${help}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for i in "$@"
|
||||
do
|
||||
case $i in
|
||||
-h|--help)
|
||||
echo "${help}"
|
||||
exit 1
|
||||
;;
|
||||
-b=*|--binary=*)
|
||||
binary="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
-l=*|--log-prefix=*)
|
||||
logprefix="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
-i=*|--iterations=*)
|
||||
iterations="${i#*=}"
|
||||
shift # past argument=value
|
||||
;;
|
||||
-race|--race|-tags=*|--tags=*)
|
||||
buildflags="${buildflags} $i"
|
||||
shift # past argument with no value
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
flags="${flags} ${i#*=}"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo -n "Compiling ${buildflags} ${binary} ... "
|
||||
go test ${buildflags} -c -o "${binary}" || {
|
||||
echo "build failed"
|
||||
exit 1
|
||||
}
|
||||
echo "OK"
|
||||
|
||||
for i in $(seq -w ${iterations}); do
|
||||
echo -n "Test ${buildflags} ${flags} ${i} "
|
||||
log="${logprefix}${i}.log"
|
||||
./${binary} ${flags} > ${log} 2>&1
|
||||
ok=$?
|
||||
if [[ ${ok} == 0 ]]; then
|
||||
echo "OK"
|
||||
rm ${log}
|
||||
else
|
||||
echo "FAIL - log in ${log}"
|
||||
fi
|
||||
done
|
||||
@@ -14,4 +14,7 @@ else
|
||||
echo "Use '$0 $version delete' to actually delete files"
|
||||
fi
|
||||
|
||||
rclone ${dry_run} -P --fast-list --checkers 16 --transfers 16 delete --include "**${version}**" memstore:beta-rclone-org
|
||||
rclone ${dry_run} -vv -P --checkers 16 --transfers 16 delete \
|
||||
--include "/${version}**" \
|
||||
--include "/branch/${version}**" \
|
||||
memstore:beta-rclone-org
|
||||
|
||||
@@ -2,5 +2,5 @@
|
||||
echo Setting environment variables for mingw+WinFsp compile
|
||||
set GOPATH=Z:\go
|
||||
rem set PATH=C:\Program Files\mingw-w64\i686-7.1.0-win32-dwarf-rt_v5-rev0\mingw32\bin;%PATH%
|
||||
set PATH=C:\Program Files\mingw-w64\x86_64-8.1.0-win32-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
||||
set PATH=C:\Program Files\mingw-w64\x86_64-8.1.0-win32-seh-rt_v6-rev0\mingw64\bin;%GOPATH%/bin;%PATH%
|
||||
set CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
|
||||
@@ -45,7 +45,7 @@ destination that are not in the source will not trigger an error.
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
cmd.Run(false, true, command, func() error {
|
||||
if download {
|
||||
return operations.CheckDownload(context.Background(), fdst, fsrc, oneway)
|
||||
}
|
||||
|
||||
@@ -486,6 +486,9 @@ func AddBackendFlags() {
|
||||
help = help[:nl]
|
||||
}
|
||||
help = strings.TrimSpace(help)
|
||||
if opt.IsPassword {
|
||||
help += " (obscured)"
|
||||
}
|
||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
|
||||
@@ -71,7 +71,9 @@ recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
|
||||
**Note**: Use the ` + "`--dry-run` or the `--interactive`/`-i`" + ` flag to test without copying anything.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -34,8 +34,10 @@ merged.
|
||||
|
||||
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
|
||||
md5sum) files it finds without confirmation. This means that for most
|
||||
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive. You
|
||||
can use ` + "`" + `--dry-run` + "`" + ` to see what would happen without doing anything.
|
||||
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
|
||||
Here is an example run.
|
||||
|
||||
|
||||
@@ -45,6 +45,9 @@ Then delete
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MB", hence
|
||||
delete all files bigger than 100MBytes.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -33,7 +33,6 @@ type frontmatter struct {
|
||||
}
|
||||
|
||||
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
|
||||
date: {{ .Date }}
|
||||
title: "{{ .Title }}"
|
||||
description: "{{ .Description }}"
|
||||
slug: {{ .Slug }}
|
||||
@@ -70,8 +69,7 @@ rclone.org website.`,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flagsHelp := strings.Replace(buf.String(), "YYYY-MM-DD", now, -1)
|
||||
err = ioutil.WriteFile(filepath.Join(root, "flags.md"), []byte(flagsHelp), 0777)
|
||||
err = ioutil.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -245,7 +245,6 @@ Use "rclone help backends" for a list of supported services.
|
||||
var docFlagsTemplate = `---
|
||||
title: "Global Flags"
|
||||
description: "Rclone Global Flags"
|
||||
date: "YYYY-MM-DD"
|
||||
---
|
||||
|
||||
# Global Flags
|
||||
@@ -325,6 +324,9 @@ func showBackend(name string) {
|
||||
}
|
||||
fmt.Printf("#### --%s%s\n\n", opt.FlagName(backend.Prefix), shortOpt)
|
||||
fmt.Printf("%s\n\n", opt.Help)
|
||||
if opt.IsPassword {
|
||||
fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n")
|
||||
}
|
||||
fmt.Printf("- Config: %s\n", opt.Name)
|
||||
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
|
||||
fmt.Printf("- Type: %s\n", opt.Type())
|
||||
|
||||
@@ -88,9 +88,9 @@ func main() {
|
||||
}
|
||||
|
||||
records := [][]string{
|
||||
[]string{"", ""},
|
||||
[]string{"", ""},
|
||||
[]string{"Bytes", "Char"},
|
||||
{"", ""},
|
||||
{"", ""},
|
||||
{"Bytes", "Char"},
|
||||
}
|
||||
for _, r := range remoteNames {
|
||||
records[0] = append(records[0], hRemoteMap[r]...)
|
||||
|
||||
@@ -3,35 +3,57 @@ package link
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
expire = fs.Duration(time.Hour * 24 * 365 * 100)
|
||||
unlink = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.FVarP(cmdFlags, &expire, "expire", "", "The amount of time that the link will be valid")
|
||||
flags.BoolVarP(cmdFlags, &unlink, "unlink", "", unlink, "Remove existing public link to file/folder")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "link remote:path",
|
||||
Short: `Generate public link to file/folder.`,
|
||||
Long: `
|
||||
rclone link will create or retrieve a public link to the given file or folder.
|
||||
Long: `rclone link will create, retrieve or remove a public link to the given
|
||||
file or folder.
|
||||
|
||||
rclone link remote:path/to/file
|
||||
rclone link remote:path/to/folder/
|
||||
rclone link --unlink remote:path/to/folder/
|
||||
rclone link --expire 1d remote:path/to/file
|
||||
|
||||
If successful, the last line of the output will contain the link. Exact
|
||||
capabilities depend on the remote, but the link will always be created with
|
||||
the least constraints – e.g. no expiry, no password protection, accessible
|
||||
without account.
|
||||
If you supply the --expire flag, it will set the expiration time
|
||||
otherwise it will use the default (100 years). **Note** not all
|
||||
backends support the --expire flag - if the backend doesn't support it
|
||||
then the link returned won't expire.
|
||||
|
||||
Use the --unlink flag to remove existing public links to the file or
|
||||
folder. **Note** not all backends support "--unlink" flag - those that
|
||||
don't will just ignore it.
|
||||
|
||||
If successful, the last line of the output will contain the
|
||||
link. Exact capabilities depend on the remote, but the link will
|
||||
always by default be created with the least constraints – e.g. no
|
||||
expiry, no password protection, accessible without account.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc, remote := cmd.NewFsFile(args[0])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
link, err := operations.PublicLink(context.Background(), fsrc, remote)
|
||||
link, err := operations.PublicLink(context.Background(), fsrc, remote, expire, unlink)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,8 +2,10 @@ package mkdir
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -18,6 +20,9 @@ var commandDefinition = &cobra.Command{
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDir(args)
|
||||
if !fdst.Features().CanHaveEmptyDirectories && strings.Contains(fdst.Root(), "/") {
|
||||
fs.Logf(fdst, "Warning: running mkdir on a remote which can't have empty directories does nothing")
|
||||
}
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return operations.Mkdir(context.Background(), fdst, "")
|
||||
})
|
||||
|
||||
@@ -4,27 +4,35 @@ import (
|
||||
"context"
|
||||
"log"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
)
|
||||
|
||||
// MountInfo defines the configuration for a mount
|
||||
type MountInfo struct {
|
||||
unmountFn UnmountFn
|
||||
MountPoint string `json:"MountPoint"`
|
||||
MountedOn time.Time `json:"MountedOn"`
|
||||
Fs string `json:"Fs"`
|
||||
}
|
||||
|
||||
var (
|
||||
// mutex to protect all the variables in this block
|
||||
mountMu sync.Mutex
|
||||
// Mount functions available
|
||||
mountFns map[string]MountFn
|
||||
// Map of mounted path => unmount function
|
||||
unmountFns map[string]UnmountFn
|
||||
mountFns = map[string]MountFn{}
|
||||
// Map of mounted path => MountInfo
|
||||
liveMounts = map[string]MountInfo{}
|
||||
)
|
||||
|
||||
// AddRc adds mount and unmount functionality to rc
|
||||
func AddRc(mountUtilName string, mountFunction MountFn) {
|
||||
if mountFns == nil {
|
||||
mountFns = make(map[string]MountFn)
|
||||
}
|
||||
if unmountFns == nil {
|
||||
unmountFns = make(map[string]UnmountFn)
|
||||
}
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
// rcMount allows the mount command to be run from rc
|
||||
mountFns[mountUtilName] = mountFunction
|
||||
}
|
||||
@@ -54,7 +62,7 @@ Eg
|
||||
})
|
||||
}
|
||||
|
||||
// rcMount allows the mount command to be run from rc
|
||||
// mountRc allows the mount command to be run from rc
|
||||
func mountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
mountPoint, err := in.GetString("mountPoint")
|
||||
if err != nil {
|
||||
@@ -63,6 +71,9 @@ func mountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
|
||||
mountType, err := in.GetString("mountType")
|
||||
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
|
||||
if err != nil || mountType == "" {
|
||||
if mountFns["mount"] != nil {
|
||||
mountType = "mount"
|
||||
@@ -80,11 +91,20 @@ func mountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
}
|
||||
|
||||
if mountFns[mountType] != nil {
|
||||
_, _, unmountFns[mountPoint], err = mountFns[mountType](fdst, mountPoint)
|
||||
_, _, unmountFn, err := mountFns[mountType](fdst, mountPoint)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = MountInfo{
|
||||
unmountFn: unmountFn,
|
||||
MountedOn: time.Now(),
|
||||
Fs: fdst.Name(),
|
||||
MountPoint: mountPoint,
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
|
||||
return nil, nil
|
||||
}
|
||||
@@ -96,7 +116,7 @@ func init() {
|
||||
Path: "mount/unmount",
|
||||
AuthRequired: true,
|
||||
Fn: unMountRc,
|
||||
Title: "Unmount all active mounts",
|
||||
Title: "Unmount selected active mount",
|
||||
Help: `
|
||||
rclone allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
@@ -113,23 +133,18 @@ Eg
|
||||
})
|
||||
}
|
||||
|
||||
// rcMount allows the umount command to be run from rc
|
||||
// unMountRc allows the umount command to be run from rc
|
||||
func unMountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
mountPoint, err := in.GetString("mountPoint")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if unmountFns != nil && unmountFns[mountPoint] != nil {
|
||||
err := unmountFns[mountPoint]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
unmountFns[mountPoint] = nil
|
||||
} else {
|
||||
return nil, errors.New("mount not found")
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
err = performUnMount(mountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -158,6 +173,8 @@ Eg
|
||||
// mountTypesRc returns a list of available mount types.
|
||||
func mountTypesRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
var mountTypes = []string{}
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
for mountType := range mountFns {
|
||||
mountTypes = append(mountTypes, mountType)
|
||||
}
|
||||
@@ -166,3 +183,81 @@ func mountTypesRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
"mountTypes": mountTypes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "mount/listmounts",
|
||||
AuthRequired: true,
|
||||
Fn: listMountsRc,
|
||||
Title: "Show current mount points",
|
||||
Help: `This shows currently mounted points, which can be used for performing an unmount
|
||||
|
||||
This takes no parameters and returns
|
||||
|
||||
- mountPoints: list of current mount points
|
||||
|
||||
Eg
|
||||
|
||||
rclone rc mount/listmounts
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// listMountsRc returns a list of current mounts
|
||||
func listMountsRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
var mountTypes = []MountInfo{}
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
for _, a := range liveMounts {
|
||||
mountTypes = append(mountTypes, a)
|
||||
}
|
||||
return rc.Params{
|
||||
"mountPoints": mountTypes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "mount/unmountall",
|
||||
AuthRequired: true,
|
||||
Fn: unmountAll,
|
||||
Title: "Show current mount points",
|
||||
Help: `This shows currently mounted points, which can be used for performing an unmount
|
||||
|
||||
This takes no parameters and returns error if unmount does not succeed.
|
||||
|
||||
Eg
|
||||
|
||||
rclone rc mount/unmountall
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// unmountAll unmounts all the created mounts
|
||||
func unmountAll(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
for key, mountInfo := range liveMounts {
|
||||
err = performUnMount(mountInfo.MountPoint)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't unmount : %s", key)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// performUnMount unmounts the specified mountPoint
|
||||
func performUnMount(mountPoint string) (err error) {
|
||||
mountInfo, ok := liveMounts[mountPoint]
|
||||
if ok {
|
||||
err := mountInfo.unmountFn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(liveMounts, mountPoint)
|
||||
} else {
|
||||
return errors.New("mount not found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -78,7 +78,9 @@ func TestRc(t *testing.T) {
|
||||
|
||||
// mount
|
||||
_, err = mount.Fn(ctx, in)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
t.Skipf("Mount failed - skipping test: %v", err)
|
||||
}
|
||||
|
||||
// check file.txt is there now
|
||||
fi, err := os.Stat(filePath)
|
||||
|
||||
@@ -49,7 +49,7 @@ option when moving a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
`,
|
||||
|
||||
@@ -44,7 +44,7 @@ modification time or MD5SUM. src will be deleted on successful
|
||||
transfer.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
`,
|
||||
|
||||
@@ -14,7 +14,21 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "obscure password",
|
||||
Short: `Obscure password for use in the rclone.conf`,
|
||||
Short: `Obscure password for use in the rclone config file`,
|
||||
Long: `In the rclone config file, human readable passwords are
|
||||
obscured. Obscuring them is done by encrypting them and writing them
|
||||
out in base64. This is **not** a secure way of encrypting these
|
||||
passwords as rclone can decrypt them - it is to prevent "eyedropping"
|
||||
- namely someone seeing a password in the rclone config file by
|
||||
accident.
|
||||
|
||||
Many equally important things (like access tokens) are not obscured in
|
||||
the config file. However it is very hard to shoulder surf a 64
|
||||
character hex token.
|
||||
|
||||
If you want to encrypt the config file then please use config file
|
||||
encryption - see [rclone config](/commands/rclone_config/) for more
|
||||
info.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -19,6 +19,9 @@ var commandDefinition = &cobra.Command{
|
||||
Remove the path and all of its contents. Note that this does not obey
|
||||
include/exclude filters - everything will be removed. Use ` + "`" + `delete` + "`" + ` if
|
||||
you want to selectively delete files.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -77,10 +77,12 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
defaultChildCount := 1
|
||||
obj.Class = "object.container.storageFolder"
|
||||
obj.Title = fileInfo.Name()
|
||||
return upnpav.Container{
|
||||
Object: obj,
|
||||
Object: obj,
|
||||
ChildCount: &defaultChildCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,14 +10,14 @@ import (
|
||||
var Help = `
|
||||
### Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
Use ` + "`--addr`" + ` to specify which IP address and port the server should
|
||||
listen on, eg ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
|
||||
IPs.
|
||||
|
||||
Use --name to choose the friendly server name, which is by
|
||||
Use ` + "`--name`" + ` to choose the friendly server name, which is by
|
||||
default "rclone (hostname)".
|
||||
|
||||
Use --log-trace in conjunction with -vv to enable additional debug
|
||||
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
|
||||
logging of all UPNP traffic.
|
||||
`
|
||||
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
// Package ftp implements an FTP server for rclone
|
||||
|
||||
//+build !plan9
|
||||
//+build !plan9,go1.13
|
||||
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
@@ -114,13 +112,11 @@ You can set a single username and password with the --user and --pass flags.
|
||||
|
||||
// server contains everything to run the server
|
||||
type server struct {
|
||||
f fs.Fs
|
||||
srv *ftp.Server
|
||||
opt Options
|
||||
vfs *vfs.VFS
|
||||
proxy *proxy.Proxy
|
||||
pendingMu sync.Mutex
|
||||
pending map[string]*Driver // pending Driver~s that haven't got their VFS
|
||||
f fs.Fs
|
||||
srv *ftp.Server
|
||||
opt Options
|
||||
vfs *vfs.VFS
|
||||
proxy *proxy.Proxy
|
||||
}
|
||||
|
||||
// Make a new FTP to serve the remote
|
||||
@@ -135,9 +131,8 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
|
||||
}
|
||||
|
||||
s := &server{
|
||||
f: f,
|
||||
opt: *opt,
|
||||
pending: make(map[string]*Driver),
|
||||
f: f,
|
||||
opt: *opt,
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(&proxyflags.Opt)
|
||||
@@ -151,7 +146,7 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
|
||||
Factory: s, // implemented by NewDriver method
|
||||
Hostname: host,
|
||||
Port: portNum,
|
||||
PublicIp: opt.PublicIP,
|
||||
PublicIP: opt.PublicIP,
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: s, // implemented by CheckPasswd method
|
||||
Logger: &Logger{},
|
||||
@@ -200,78 +195,13 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
|
||||
fs.Infof(sessionID, "< %d %s", code, message)
|
||||
}
|
||||
|
||||
// findID finds the connection ID of the calling program. It does
|
||||
// this in an incredibly hacky way by looking in the stack trace.
|
||||
//
|
||||
// callerName should be the name of the function that we are looking
|
||||
// for with a trailing '('
|
||||
//
|
||||
// What is really needed is a change of calling protocol so
|
||||
// CheckPassword is called with the connection.
|
||||
func findID(callerName []byte) (string, error) {
|
||||
// Dump the stack in this format
|
||||
// github.com/rclone/rclone/vendor/goftp.io/server.(*Conn).Serve(0xc0000b2680)
|
||||
// /home/ncw/go/src/github.com/rclone/rclone/vendor/goftp.io/server/conn.go:116 +0x11d
|
||||
buf := make([]byte, 4096)
|
||||
n := runtime.Stack(buf, false)
|
||||
buf = buf[:n]
|
||||
|
||||
// look for callerName first
|
||||
i := bytes.Index(buf, callerName)
|
||||
if i < 0 {
|
||||
return "", errors.Errorf("findID: caller name not found in:\n%s", buf)
|
||||
}
|
||||
buf = buf[i+len(callerName):]
|
||||
|
||||
// find next ')'
|
||||
i = bytes.IndexByte(buf, ')')
|
||||
if i < 0 {
|
||||
return "", errors.Errorf("findID: end of args not found in:\n%s", buf)
|
||||
}
|
||||
buf = buf[:i]
|
||||
|
||||
// trim off first argument
|
||||
// find next ','
|
||||
i = bytes.IndexByte(buf, ',')
|
||||
if i >= 0 {
|
||||
buf = buf[:i]
|
||||
}
|
||||
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
var connServeFunction = []byte("(*Conn).Serve(")
|
||||
|
||||
// CheckPasswd handle auth based on configuration
|
||||
//
|
||||
// This is not used - the one in Driver should be called instead
|
||||
func (s *server) CheckPasswd(user, pass string) (ok bool, err error) {
|
||||
var VFS *vfs.VFS
|
||||
if s.proxy != nil {
|
||||
VFS, _, err = s.proxy.Call(user, pass, false)
|
||||
if err != nil {
|
||||
fs.Infof(nil, "proxy login failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
id, err := findID(connServeFunction)
|
||||
if err != nil {
|
||||
fs.Infof(nil, "proxy login failed: failed to read ID from stack: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
s.pendingMu.Lock()
|
||||
d := s.pending[id]
|
||||
delete(s.pending, id)
|
||||
s.pendingMu.Unlock()
|
||||
if d == nil {
|
||||
return false, errors.Errorf("proxy login failed: failed to find pending Driver under ID %q", id)
|
||||
}
|
||||
d.vfs = VFS
|
||||
} else {
|
||||
ok = s.opt.BasicUser == user && (s.opt.BasicPass == "" || s.opt.BasicPass == pass)
|
||||
if !ok {
|
||||
fs.Infof(nil, "login failed: bad credentials")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
err = errors.New("internal error: server.CheckPasswd should never be called")
|
||||
fs.Errorf(nil, "Error: %v", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// NewDriver starts a new session for each client connection
|
||||
@@ -291,15 +221,25 @@ type Driver struct {
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
//Init a connection
|
||||
func (d *Driver) Init(c *ftp.Conn) {
|
||||
defer log.Trace("", "Init session")("")
|
||||
if d.s.proxy != nil {
|
||||
id := fmt.Sprintf("%p", c)
|
||||
d.s.pendingMu.Lock()
|
||||
d.s.pending[id] = d
|
||||
d.s.pendingMu.Unlock()
|
||||
// CheckPasswd handle auth based on configuration
|
||||
func (d *Driver) CheckPasswd(user, pass string) (ok bool, err error) {
|
||||
s := d.s
|
||||
if s.proxy != nil {
|
||||
var VFS *vfs.VFS
|
||||
VFS, _, err = s.proxy.Call(user, pass, false)
|
||||
if err != nil {
|
||||
fs.Infof(nil, "proxy login failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
d.vfs = VFS
|
||||
} else {
|
||||
ok = s.opt.BasicUser == user && (s.opt.BasicPass == "" || s.opt.BasicPass == pass)
|
||||
if !ok {
|
||||
fs.Infof(nil, "login failed: bad credentials")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
//Stat get information on file or folder
|
||||
|
||||
@@ -3,12 +3,11 @@
|
||||
//
|
||||
// We skip tests on platforms with troublesome character mappings
|
||||
|
||||
//+build !windows,!darwin,!plan9
|
||||
//+build !windows,!darwin,!plan9,go1.13
|
||||
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
ftp "goftp.io/server"
|
||||
)
|
||||
|
||||
@@ -70,10 +68,3 @@ func TestFTP(t *testing.T) {
|
||||
|
||||
servetest.Run(t, "ftp", start)
|
||||
}
|
||||
|
||||
func TestFindID(t *testing.T) {
|
||||
id, err := findID([]byte("TestFindID("))
|
||||
require.NoError(t, err)
|
||||
// id should be the argument to this function
|
||||
assert.Equal(t, fmt.Sprintf("%p", t), id)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
// +build plan9 !go1.13
|
||||
|
||||
package ftp
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/serve"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -33,9 +34,10 @@ var (
|
||||
|
||||
func init() {
|
||||
httpflags.AddFlags(Command.Flags())
|
||||
Command.Flags().BoolVar(&stdio, "stdio", false, "run an HTTP2 server on stdin/stdout")
|
||||
Command.Flags().BoolVar(&appendOnly, "append-only", false, "disallow deletion of repository data")
|
||||
Command.Flags().BoolVar(&privateRepos, "private-repos", false, "users can only access their private repo")
|
||||
flagSet := Command.Flags()
|
||||
flags.BoolVarP(flagSet, &stdio, "stdio", "", false, "run an HTTP2 server on stdin/stdout")
|
||||
flags.BoolVarP(flagSet, &appendOnly, "append-only", "", false, "disallow deletion of repository data")
|
||||
flags.BoolVarP(flagSet, &privateRepos, "private-repos", "", false, "users can only access their private repo")
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
|
||||
@@ -29,8 +29,6 @@ type StartFn func(f fs.Fs) (configmap.Simple, func())
|
||||
// run runs the server then runs the unit tests for the remote against
|
||||
// it.
|
||||
func run(t *testing.T, name string, start StartFn, useProxy bool) {
|
||||
fstest.Initialise()
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote()
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
@@ -98,6 +96,7 @@ func run(t *testing.T, name string, start StartFn, useProxy bool) {
|
||||
// Run runs the server then runs the unit tests for the remote against
|
||||
// it.
|
||||
func Run(t *testing.T, name string, start StartFn) {
|
||||
fstest.Initialise()
|
||||
t.Run("Normal", func(t *testing.T) {
|
||||
run(t, name, start, false)
|
||||
})
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/errors"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
@@ -30,11 +31,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
httpflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
Command.Flags().StringVar(&hashName, "etag-hash", "", "Which hash to use for the ETag, or auto or blank for off")
|
||||
Command.Flags().BoolVar(&disableGETDir, "disable-dir-list", false, "Disable HTML directory list on GET request for a directory")
|
||||
flagSet := Command.Flags()
|
||||
httpflags.AddFlags(flagSet)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
flags.StringVarP(flagSet, &hashName, "etag-hash", "", "", "Which hash to use for the ETag, or auto or blank for off")
|
||||
flags.BoolVarP(flagSet, &disableGETDir, "disable-dir-list", "", false, "Disable HTML directory list on GET request for a directory")
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
|
||||
@@ -30,7 +30,9 @@ modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`" + `--dry-run` + "`" + ` flag to see exactly what would be copied and deleted.
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
|
||||
rclone sync -i SOURCE remote:DESTINATION
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point.
|
||||
|
||||
@@ -19,8 +19,11 @@ var (
|
||||
localTime bool
|
||||
)
|
||||
|
||||
const defaultLayout string = "060102"
|
||||
const layoutDateWithTime = "2006-01-02T15:04:05"
|
||||
const (
|
||||
defaultLayout string = "060102"
|
||||
layoutDateWithTime = "2006-01-02T15:04:05"
|
||||
layoutDateWithTimeNano = "2006-01-02T15:04:05.999999999"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
@@ -45,6 +48,7 @@ time instead of the current time. Times may be specified as one of:
|
||||
|
||||
- 'YYMMDD' - eg. 17.10.30
|
||||
- 'YYYY-MM-DDTHH:MM:SS' - eg. 2006-01-02T15:04:05
|
||||
- 'YYYY-MM-DDTHH:MM:SS.SSS' - eg. 2006-01-02T15:04:05.123456789
|
||||
|
||||
Note that --timestamp is in UTC if you want local time then add the
|
||||
--localtime flag.
|
||||
@@ -65,6 +69,8 @@ func Touch(ctx context.Context, fsrc fs.Fs, srcFileName string) (err error) {
|
||||
layout := defaultLayout
|
||||
if len(timeAsArgument) == len(layoutDateWithTime) {
|
||||
layout = layoutDateWithTime
|
||||
} else if len(timeAsArgument) > len(layoutDateWithTime) {
|
||||
layout = layoutDateWithTimeNano
|
||||
}
|
||||
var timeAtrFromFlags time.Time
|
||||
if localTime {
|
||||
|
||||
@@ -61,7 +61,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables).")
|
||||
flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime.")
|
||||
// Graphics
|
||||
flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "i", false, "Don't print indentation lines.")
|
||||
flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "", false, "Don't print indentation lines.")
|
||||
flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always.")
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
"~$",
|
||||
"^\\."
|
||||
],
|
||||
"enableGitInfo": true,
|
||||
"markup": {
|
||||
"goldmark": {
|
||||
"extensions": {
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
title: "Rclone"
|
||||
description: "Rclone syncs your files to cloud storage: Google Drive, S3, Swift, Dropbox, Google Cloud Storage, Azure, Box and many more."
|
||||
type: page
|
||||
date: "2020-05-16"
|
||||
---
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
@@ -10,9 +9,9 @@ date: "2020-05-16"
|
||||
{{< img width="50%" src="/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" >}}
|
||||
|
||||
- [About rclone](#about)
|
||||
- [What can rclone do for you](#what)
|
||||
- [What features does rclone have](#features)
|
||||
- [What providers does rclone support](#providers)
|
||||
- [What can rclone do for you?](#what)
|
||||
- [What features does rclone have?](#features)
|
||||
- [What providers does rclone support?](#providers)
|
||||
- [Download](/downloads/)
|
||||
- [Install](/install/)
|
||||
{{< rem MAINPAGELINK >}}
|
||||
@@ -27,20 +26,20 @@ services, as well as standard transfer protocols.
|
||||
|
||||
Rclone has powerful cloud equivalents to the unix commands rsync, cp,
|
||||
mv, mount, ls, ncdu, tree, rm, and cat. Rclone's familiar syntax
|
||||
includes shell pipeline support, and `--dry-run` protection. It can be
|
||||
includes shell pipeline support, and `--dry-run` protection. It is
|
||||
used at the command line, in scripts or via its [API](/rc).
|
||||
|
||||
Users have called rclone *"The Swiss army knife of cloud storage"* and
|
||||
Users call rclone *"The Swiss army knife of cloud storage"*, and
|
||||
*"Technology indistinguishable from magic"*.
|
||||
|
||||
Rclone really looks after your data. It preserves timestamps and
|
||||
verifies your data at all times. Transfers over limited bandwidth;
|
||||
verifies checksums at all times. Transfers over limited bandwidth;
|
||||
intermittent connections, or subject to quota can be restarted, from
|
||||
the last good file transferred. You can
|
||||
[check](/commands/rclone_check/) the integrity of your files. Where
|
||||
possible, rclone employs server side transfers to minimise local
|
||||
bandwidth use and transfers from one provider to another without
|
||||
using your local disk.
|
||||
using local disk.
|
||||
|
||||
Virtual backends wrap local and cloud file systems to apply
|
||||
[encryption](/crypt/),
|
||||
@@ -48,9 +47,9 @@ Virtual backends wrap local and cloud file systems to apply
|
||||
[chunking](/chunker/) and
|
||||
[joining](/union/).
|
||||
|
||||
Rclone can [mount](/commands/rclone_mount/) any local, cloud or
|
||||
Rclone [mounts](/commands/rclone_mount/) any local, cloud or
|
||||
virtual filesystem as a disk on Windows,
|
||||
macOS, linux and FreeBSD, and also serve these over
|
||||
macOS, linux and FreeBSD, and also serves these over
|
||||
[SFTP](/commands/rclone_serve_sftp/),
|
||||
[HTTP](/commands/rclone_serve_http/),
|
||||
[WebDAV](/commands/rclone_serve_webdav/),
|
||||
@@ -64,14 +63,14 @@ Fedora, Brew and Chocolatey repos. include rclone. For the latest
|
||||
version [downloading from rclone.org](/downloads/) is recommended.
|
||||
|
||||
Rclone is widely used on Linux, Windows and Mac. Third party
|
||||
developers have built innovative backup, restore, GUI and business
|
||||
developers create innovative backup, restore, GUI and business
|
||||
process solutions using the rclone command line or API.
|
||||
|
||||
Let rclone do the heavy lifting of communicating with cloud storage.
|
||||
Rclone does the heavy lifting of communicating with cloud storage.
|
||||
|
||||
## What can rclone do for you {#what}
|
||||
## What can rclone do for you? {#what}
|
||||
|
||||
Rclone can help you:
|
||||
Rclone helps you:
|
||||
|
||||
- Backup (and encrypt) files to cloud storage
|
||||
- Restore (and decrypt) files from cloud storage
|
||||
@@ -99,7 +98,7 @@ Rclone can help you:
|
||||
|
||||
## Supported providers {#providers}
|
||||
|
||||
(There are many other providers, built on standard protocols such as
|
||||
(There are many others, built on standard protocols such as
|
||||
WebDAV or S3, that work out of the box.)
|
||||
|
||||
{{< provider_list >}}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "Alias"
|
||||
description: "Remote Aliases"
|
||||
date: "2018-01-30"
|
||||
---
|
||||
|
||||
{{< icon "fa fa-link" >}} Alias
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "Amazon Drive"
|
||||
description: "Rclone docs for Amazon Drive"
|
||||
date: "2017-06-10"
|
||||
---
|
||||
|
||||
{{< icon "fab fa-amazon" >}} Amazon Drive
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "Authors"
|
||||
description: "Rclone Authors and Contributors"
|
||||
date: "2016-11-02"
|
||||
---
|
||||
|
||||
Authors
|
||||
@@ -378,3 +377,18 @@ put them back in again.` >}}
|
||||
* Martin Michlmayr <tbm@cyrius.com>
|
||||
* Brandon McNama <bmcnama@pagerduty.com>
|
||||
* Daniel Slyman <github@skylayer.eu>
|
||||
* Alex Guerrero <guerrero@users.noreply.github.com>
|
||||
* Matteo Pietro Dazzi <matteopietro.dazzi@gft.com>
|
||||
* edwardxml <56691903+edwardxml@users.noreply.github.com>
|
||||
* Roman Kredentser <shareed2k@gmail.com>
|
||||
* Kamil Trzciński <ayufan@ayufan.eu>
|
||||
* Zac Rubin <z-0@users.noreply.github.com>
|
||||
* Vincent Feltz <psycho@feltzv.fr>
|
||||
* Heiko Bornholdt <bornholdt@informatik.uni-hamburg.de>
|
||||
* Matteo Pietro Dazzi <matteopietro.dazzi@gmail.com>
|
||||
* jtagcat <gitlab@c7.ee>
|
||||
* Petri Salminen <petri@salminen.dev>
|
||||
* Tim Burke <tim.burke@gmail.com>
|
||||
* Kai Lüke <kai@kinvolk.io>
|
||||
* Garrett Squire <github@garrettsquire.com>
|
||||
* Evan Harris <eharris@puremagic.com>
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "Microsoft Azure Blob Storage"
|
||||
description: "Rclone docs for Microsoft Azure Blob Storage"
|
||||
date: "2017-07-30"
|
||||
---
|
||||
|
||||
{{< icon "fab fa-windows" >}} Microsoft Azure Blob Storage
|
||||
@@ -66,7 +65,7 @@ List the contents of a container
|
||||
Sync `/home/local/directory` to the remote container, deleting any excess
|
||||
files in the container.
|
||||
|
||||
rclone sync /home/local/directory remote:container
|
||||
rclone sync -i /home/local/directory remote:container
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "B2"
|
||||
description: "Backblaze B2"
|
||||
date: "2016-10-25"
|
||||
---
|
||||
|
||||
{{< icon "fa fa-fire" >}} Backblaze B2
|
||||
@@ -71,7 +70,7 @@ List the contents of a bucket
|
||||
Sync `/home/local/directory` to the remote bucket, deleting any
|
||||
excess files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
rclone sync -i /home/local/directory remote:bucket
|
||||
|
||||
### Application Keys ###
|
||||
|
||||
@@ -118,6 +117,12 @@ the following characters are also replaced:
|
||||
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
||||
as they can't be used in JSON strings.
|
||||
|
||||
Note that in 2020-05 Backblaze started allowing \ characters in file
|
||||
names. Rclone hasn't changed its encoding as this could cause syncs to
|
||||
re-transfer files. If you want rclone not to replace \ then see the
|
||||
`--b2-encoding` flag below and remove the `BackSlash` from the
|
||||
string. This can be set in the config.
|
||||
|
||||
### SHA1 checksums ###
|
||||
|
||||
The SHA1 checksums of the files are checked on upload and download and
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "Box"
|
||||
description: "Rclone docs for Box"
|
||||
date: "2015-10-14"
|
||||
---
|
||||
|
||||
{{< icon "fa fa-archive" >}} Box
|
||||
@@ -227,6 +226,12 @@ normally 8MB so increasing `--transfers` will increase memory use.
|
||||
Depending on the enterprise settings for your user, the item will
|
||||
either be actually deleted from Box or moved to the trash.
|
||||
|
||||
Emptying the trash is supported via the rclone however cleanup command
|
||||
however this deletes every trashed file and folder individually so it
|
||||
may take a very long time.
|
||||
Emptying the trash via the WebUI does not have this limitation
|
||||
so it is advised to empty the trash via the WebUI.
|
||||
|
||||
### Root folder ID ###
|
||||
|
||||
You can set the `root_folder_id` for rclone. This is the directory
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "Bugs"
|
||||
description: "Rclone Bugs and Limitations"
|
||||
date: "2019-08-04"
|
||||
---
|
||||
|
||||
# Bugs and Limitations
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
title: "Cache"
|
||||
description: "Rclone docs for cache remote"
|
||||
date: "2017-09-03"
|
||||
---
|
||||
|
||||
{{< icon "fa fa-archive" >}} Cache (BETA)
|
||||
|
||||
@@ -1,11 +1,70 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2020-05-27"
|
||||
---
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.52.2 - 2020-06-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.1...v1.52.2)
|
||||
|
||||
* Bug Fixes
|
||||
* build
|
||||
* Fix docker release build action (Nick Craig-Wood)
|
||||
* Fix custom timezone in Docker image (NoLooseEnds)
|
||||
* check: Fix misleading message which printed errors instead of differences (Nick Craig-Wood)
|
||||
* errors: Add WSAECONNREFUSED and more to the list of retriable Windows errors (Nick Craig-Wood)
|
||||
* rcd: Fix incorrect prometheus metrics (Gary Kim)
|
||||
* serve restic: Fix flags so they use environment variables (Nick Craig-Wood)
|
||||
* serve webdav: Fix flags so they use environment variables (Nick Craig-Wood)
|
||||
* sync: Fix --track-renames-strategy modtime (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Fix not being able to delete a directory with a trashed shortcut (Nick Craig-Wood)
|
||||
* Fix creating a directory inside a shortcut (Nick Craig-Wood)
|
||||
* Fix --drive-impersonate with cached root_folder_id (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Fix SSH key PEM loading (Zac Rubin)
|
||||
* Swift
|
||||
* Speed up deletes by not retrying segment container deletes (Nick Craig-Wood)
|
||||
* Tardigrade
|
||||
* Upgrade to uplink v1.1.1 (Caleb Case)
|
||||
* WebDAV
|
||||
* Fix free/used display for rclone about/df for certain backends (Nick Craig-Wood)
|
||||
|
||||
## v1.52.1 - 2020-06-10
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.52.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/file: Fix SetSparse on Windows 7 which fixes downloads of files > 250MB (Nick Craig-Wood)
|
||||
* build
|
||||
* Update go.mod to go1.14 to enable -mod=vendor build (Nick Craig-Wood)
|
||||
* Remove quicktest from Dockerfile (Nick Craig-Wood)
|
||||
* Build Docker images with GitHub actions (Matteo Pietro Dazzi)
|
||||
* Update Docker build workflows (Nick Craig-Wood)
|
||||
* Set user_allow_other in /etc/fuse.conf in the Docker image (Nick Craig-Wood)
|
||||
* Fix xgo build after go1.14 go.mod update (Nick Craig-Wood)
|
||||
* docs
|
||||
* Add link to source and modified time to footer of every page (Nick Craig-Wood)
|
||||
* Remove manually set dates and use git dates instead (Nick Craig-Wood)
|
||||
* Minor tense, punctuation, brevity and positivity changes for the home page (edwardxml)
|
||||
* Remove leading slash in page reference in footer when present (Nick Craig-Wood)
|
||||
* Note commands which need obscured input in the docs (Nick Craig-Wood)
|
||||
* obscure: Write more help as we are referencing it elsewhere (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix OS vs Unix path confusion - fixes ChangeNotify on Windows (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Fix missing items when listing using --fast-list / ListR (Nick Craig-Wood)
|
||||
* Putio
|
||||
* Fix panic on Object.Open (Cenk Alti)
|
||||
* S3
|
||||
* Fix upload of single files into buckets without create permission (Nick Craig-Wood)
|
||||
* Fix --header-upload (Nick Craig-Wood)
|
||||
* Tardigrade
|
||||
* Fix listing bug by upgrading to v1.0.7
|
||||
* Set UserAgent to rclone (Caleb Case)
|
||||
|
||||
## v1.52.0 - 2020-05-27
|
||||
|
||||
Special thanks to Martin Michlmayr for proof reading and correcting
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user