1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-24 12:13:19 +00:00

Compare commits

..

3 Commits

Author SHA1 Message Date
Anagh Kumar Baranwal
6a86ec70ea Added support for Max Pages
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-02-10 14:26:58 +05:30
Nick Craig-Wood
37c55972e3 vfs: add --vfs-read-wait and --vfs-write-wait flags
--vfs-read-wait duration    Time to wait for in-sequence read before seeking. (default 5ms)
    --vfs-write-wait duration   Time to wait for in-sequence write before giving error. (default 1s)

See: https://forum.rclone.org/t/constantly-high-iowait-add-log/14156
2020-02-10 10:30:20 +05:30
Nick Craig-Wood
91aef66881 mount: add --async-read flag to disable asynchronous reads
See: https://forum.rclone.org/t/constantly-high-iowait-add-log/14156
2020-02-10 10:30:20 +05:30
2722 changed files with 765551 additions and 109866 deletions

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -33,18 +33,18 @@ The Rclone Developers
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (e.g. Google Drive)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Rclone Forum Community Support
url: https://forum.rclone.org/
about: Please ask and answer questions here.

View File

@@ -19,79 +19,82 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
include:
- job_name: linux
os: ubuntu-latest
go: '1.16.x'
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
deploy: true
- job_name: mac_amd64
- job_name: mac
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
go: '1.13.x'
modules: 'off'
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.16.x'
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.16.x'
go: '1.13.x'
modules: 'off'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
go: '1.13.x'
modules: 'off'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: go1.13
- job_name: modules_race
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
modules: 'on'
quicktest: true
racequicktest: true
- job_name: go1.15
- job_name: go1.10
os: ubuntu-latest
go: '1.15.x'
go: '1.10.x'
modules: 'off'
quicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: 'off'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: 'off'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
@@ -99,24 +102,26 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v1
with:
fetch-depth: 0
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
- name: Install Go
uses: actions/setup-go@v2
uses: actions/setup-go@v1
with:
stable: 'false'
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
@@ -131,7 +136,7 @@ jobs:
shell: bash
run: |
brew update
brew install --cask macfuse
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
@@ -139,10 +144,10 @@ jobs:
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
@@ -162,22 +167,10 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
@@ -204,9 +197,8 @@ jobs:
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
make travis_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
@@ -229,19 +221,19 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
# xgo \
# -image=billziss/xgo-cgofuse \
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -tags cmount \
# -dest build \
# .
go get -v github.com/karalabe/xgo
xgo \
-image=billziss/xgo-cgofuse \
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
-dest build \
.
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
@@ -249,13 +241,13 @@ jobs:
.
- name: Build rclone
shell: bash
run: |
make
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
- name: Upload artifacts
run: |
make ci_upload
make circleci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork

View File

@@ -1,25 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,33 +0,0 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

1
.gitignore vendored
View File

@@ -9,4 +9,3 @@ rclone.iml
*.test
*.log
*.iml
fuzz-build.zip

View File

@@ -12,10 +12,10 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
@@ -48,7 +48,7 @@ When ready - run the unit tests for the code you changed
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
Note that you may need to make a test remote, eg `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
@@ -72,22 +72,15 @@ Make sure you
When you are done with that
git push -u origin my-new-feature
git push origin my-new-feature
Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to GitHub with `--force`.
## CI for your fork ##
@@ -99,7 +92,7 @@ rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
@@ -115,8 +108,8 @@ are skipped if `TestDrive:` isn't defined.
cd backend/drive
go test -v
You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system,
You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local filing system,
but they can be run against any of the remotes.
cd fs/sync
@@ -127,7 +120,7 @@ but they can be run against any of the remotes.
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the
all together with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
@@ -155,7 +148,6 @@ with modules beneath.
* ...commands
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
@@ -165,12 +157,12 @@ with modules beneath.
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclone's hash types and functions
* hash - defines rclones hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, e.g. Copy, Move
* operations - primitives for sync, eg Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
@@ -178,7 +170,7 @@ with modules beneath.
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website, etc.
* graphics - the images used in the website etc
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
@@ -186,6 +178,7 @@ with modules beneath.
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod`
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
@@ -202,12 +195,12 @@ for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
files. The MANUAL.*, rclone.1, web site etc are all auto generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, e.g.
Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
@@ -265,27 +258,43 @@ rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
rclone can be built with modules outside of the `GOPATH`.
**NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum` and vendor it for older go versions.
GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
## Updating a dependency ##
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go mod vendor
Check in a single commit as above.
Check in in a single commit as above.
## Updating all the dependencies ##
@@ -333,8 +342,8 @@ Getting going
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests
@@ -364,7 +373,7 @@ See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as
alphabetical order of full name of remote (eg `drive` is ordered as
`Google Drive`) but with the local file system last.
* `README.md` - main GitHub page
@@ -373,7 +382,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
@@ -400,7 +409,7 @@ Usage
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository

View File

@@ -3,21 +3,19 @@ FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
RUN apk --no-cache add ca-certificates fuse
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
ENTRYPOINT [ "rclone" ]
WORKDIR /data

View File

@@ -2,20 +2,17 @@
Current active maintainers of rclone are:
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
**This is a work in progress Draft**
@@ -33,11 +30,11 @@ Rclone uses the labels like this:
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `FUSE` - do do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc.
* `maintenance` - internal enhancement, code re-organisation etc
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
@@ -45,7 +42,7 @@ Rclone uses the labels like this:
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
The milestones have these meanings:

12926
MANUAL.html generated

File diff suppressed because it is too large Load Diff

13049
MANUAL.md generated

File diff suppressed because it is too large Load Diff

15992
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

136
Makefile
View File

@@ -1,35 +1,33 @@
SHELL = bash
# Branch we are working on
BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
# Tag of the current commit, if any. If this is not "" then we are building a release
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
endif
TAG_BRANCH := .$(BRANCH)
BRANCH_PATH := branch/$(BRANCH)/
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
# TAG is current version + commit number + commit + branch
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
# TAG is current version + number of commits since last release + branch
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifndef RELEASE_TAG
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
@@ -46,26 +44,23 @@ endif
.PHONY: rclone test_all vars version
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'"
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'"
btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
version:
@echo '$(TAG)'
@@ -76,10 +71,10 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
# Do source code quality checks
check: rclone
@@ -91,42 +86,35 @@ check: rclone
build_dep:
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux
release_dep_linux:
cd /tmp && go get github.com/goreleaser/nfpm/v2/...
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
# Get the release dependencies
release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct and indirect dependencies and test dependencies
update:
GO111MODULE=on go get -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go get -u ./...
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
# Tidy the module dependencies
tidy:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown-smart --to html MANUAL.md -o MANUAL.html
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
@@ -148,27 +136,14 @@ clean:
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
website:
rm -rf docs/public
cd docs && hugo
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
validate_website: website
find docs/public -type f -name "*.html" | xargs tidy --mute-id yes -errors --gnu-emacs yes --drop-empty-elements no --warn-proprietary-attributes no --mute MISMATCHED_ATTRIBUTE_WARN
tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
vendorball:
go mod vendor
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
rm -rf vendor
sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
@@ -187,10 +162,10 @@ upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -198,61 +173,64 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
ci_upload:
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from GitHub actions
# Fetch the binary builds from travis and appveyor
fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server -v -w --disableFastRender
cd docs && hugo server -v -w
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
tag: doc
@echo "Old tag is $(VERSION)"
@echo "New tag is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(VERSION)\" -a -v
@echo "And finally run make retag before make cross, etc."
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
@echo "And finally run make retag before make cross etc"
retag:
@echo "Version is $(VERSION)"
git tag -f -s -m "Version $(VERSION)" $(VERSION)
startdev:
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(VERSION)-DEV development" fs/version.go
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -30,13 +30,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
@@ -61,16 +59,12 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -85,7 +79,6 @@ Please see [the full list of all storage providers and their features](https://r
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))

View File

@@ -4,12 +4,12 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release
* [gh the github cli](https://github.com/cli/cli) for uploading packages
* [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages
## Making a release
* git checkout master # see below for stable branch
* git checkout master
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
@@ -21,20 +21,19 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push --follow-tags origin
* git push --tags origin master
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
* make startdev
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the dependencies
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
@@ -42,45 +41,71 @@ Early in the next release cycle update the dependencies
* git add new files
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
## Making a point release
If rclone needs a point release due to some horrendous bug:
Set vars
* BASE_TAG=v1.XX # e.g. v1.52
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
Now
* git co ${BASE_TAG}-stable
* git co ${BASE_TAG}-fixes
* git cherry-pick any fixes
* Do the steps as above
* make startstable
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-fixes
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* make VERSION=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend
* git push
* Announce!
## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't
The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this.
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
docker push rclone/rclone:1.52
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +1 @@
v1.55.0
v1.51.0

View File

@@ -1,12 +1,10 @@
package alias
import (
"context"
"errors"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
@@ -35,7 +33,7 @@ type Options struct {
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -48,5 +46,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
}

View File

@@ -19,7 +19,7 @@ var (
)
func prepare(t *testing.T, root string) {
config.LoadConfig(context.Background())
config.LoadConfig()
// Configure the remote
config.FileSet(remoteName, "type", "alias")
@@ -54,22 +54,21 @@ func TestNewFS(t *testing.T) {
{"four/under four.txt", 9, false},
}},
{"four", "..", "", true, []testEntry{
{"five", -1, true},
{"under four.txt", 9, false},
{"four", -1, true},
{"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
}},
{"", "../../three", "", true, []testEntry{
{"four", "../three", "", true, []testEntry{
{"underthree.txt", 9, false},
}},
{"four", "../../five", "", true, []testEntry{
{"underfive.txt", 6, false},
}},
} {
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
require.NoError(t, err, what)
prepare(t, remoteRoot)
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList)
require.NoError(t, err, what)
@@ -91,7 +90,7 @@ func TestNewFS(t *testing.T) {
func TestNewFSNoRemote(t *testing.T) {
prepare(t, "")
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)
@@ -99,7 +98,7 @@ func TestNewFSNoRemote(t *testing.T) {
func TestNewFSInvalidRemote(t *testing.T) {
prepare(t, "not_existing_test_remote:")
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)

View File

@@ -9,16 +9,13 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/jottacloud"
@@ -34,14 +31,11 @@ import (
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zoho"
)

View File

@@ -70,13 +70,29 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Amazon Application Client ID.",
Required: true,
}, {
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret.",
Required: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use Amazon's.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\nleave blank to use Amazon's.",
Advanced: true,
}, {
Name: "checkpoint",
Help: "Checkpoint for internal polling (debug).",
Hide: fs.OptionHideBoth,
@@ -127,7 +143,7 @@ underlying S3 storage.`,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -144,7 +160,6 @@ type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
ci *fs.ConfigInfo // global config
c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
@@ -154,7 +169,7 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
// Object describes an acd object
// Object describes a acd object
//
// Will definitely have info but maybe not meta
type Object struct {
@@ -214,7 +229,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
}
// Work around receiving this error sporadically on authentication
//
// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
return true, err
@@ -240,7 +255,8 @@ func filterRequest(req *http.Request) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -248,7 +264,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
root = parsePath(root)
baseClient := fshttp.NewClient(ctx)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
@@ -256,27 +272,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
}
c := acd.NewClient(oAuthClient)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
c: c,
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(ctx),
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
}).Fill(f)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
@@ -500,11 +514,15 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
maxTries := f.ci.LowLevelRetries
maxTries := fs.Config.LowLevelRetries
var iErr error
for tries := 1; tries <= maxTries; tries++ {
entries = nil
@@ -525,7 +543,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
default:
// ignore ASSET, etc.
// ignore ASSET etc
}
return false
})
@@ -647,7 +665,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return nil, err
}
// If not create it
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
@@ -678,11 +696,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Move src to this remote using server-side move operations.
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
@@ -700,6 +724,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return nil, err
}
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
@@ -719,7 +747,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dstObj fs.Object
srcErr, dstErr error
)
for i := 1; i <= f.ci.LowLevelRetries; i++ {
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source
@@ -734,7 +762,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// finished if src not found and dst found
break
}
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
time.Sleep(1 * time.Second)
}
return dstObj, dstErr
@@ -747,7 +775,7 @@ func (f *Fs) DirCacheFlush() {
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -769,24 +797,54 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.New("can't move root directory")
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true)
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src parent
_, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false)
findPath = srcRemote
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
}
if err != nil {
return err
}
@@ -832,6 +890,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -895,7 +957,7 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
@@ -923,8 +985,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// ------------------------------------------------------------
@@ -975,7 +1037,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.info != nil {
return nil
}
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris,!js,go1.14
// +build !plan9,!solaris
package azureblob
@@ -16,20 +16,3 @@ func (f *Fs) InternalTest(t *testing.T) {
enabled = f.Features().GetTier
assert.True(t, enabled)
}
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -1,16 +1,14 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.14
// +build !plan9,!solaris
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
// TestIntegration runs integration tests against the remote
@@ -29,36 +27,11 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris js !go1.14
// +build plan9 solaris
package azureblob

View File

@@ -1,137 +0,0 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
}
return result, nil
}

View File

@@ -1,117 +0,0 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -337,11 +337,3 @@ type CopyFileRequest struct {
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}

View File

@@ -33,7 +33,6 @@ import (
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
)
@@ -44,10 +43,8 @@ const (
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
idHeader = "X-Bz-File-Id"
nameHeader = "X-Bz-File-Name"
timestampHeader = "X-Bz-Upload-Timestamp"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
@@ -57,9 +54,6 @@ const (
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Globals
@@ -119,16 +113,6 @@ Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6GB.`,
Default: largeFileCopyCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
@@ -140,13 +124,8 @@ minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Default: false,
Advanced: true,
}, {
@@ -155,9 +134,7 @@ to start uploading.`,
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
This is probably only useful for a public bucket.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}, {
@@ -168,18 +145,6 @@ The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -203,13 +168,10 @@ type Options struct {
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -218,7 +180,6 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *rest.Client // the connection to the b2 server
rootBucket string // bucket part of root (if any)
@@ -233,8 +194,7 @@ type Fs struct {
uploads map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
bufferTokens chan []byte // control concurrency of multipart uploads
}
// Object describes a b2 object
@@ -295,7 +255,7 @@ func (o *Object) split() (bucket, bucketPath string) {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (e.g. "Token has expired")
401, // Unauthorized (eg "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
@@ -370,6 +330,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
f.fillBufferTokens() // reset the buffer tokens
}
return
}
@@ -396,17 +357,14 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.UploadCutoff < opt.ChunkSize {
opt.UploadCutoff = opt.ChunkSize
fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
@@ -424,24 +382,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
cache: bucket.NewCache(),
_bucketID: make(map[string]string, 1),
_bucketType: make(map[string]string, 1),
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -449,13 +398,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(ctx, f)
}).Fill(f)
// Set the test flag if required
if opt.TestMode != "" {
testMode := strings.TrimSpace(opt.TestMode)
f.srv.SetHeader(testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
}
f.fillBufferTokens()
err = f.authorizeAccount(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
@@ -479,9 +429,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
// File doesn't exist so return old f
f.setRoot(oldRoot)
return f, nil
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
f.setRoot(oldRoot)
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
@@ -575,25 +528,32 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get()
if !noBuf {
buf = f.pool.Get()
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
}
// getUploadBlock gets a block from the pool of size chunkSize
func (f *Fs) getUploadBlock() []byte {
buf := <-f.bufferTokens
if buf == nil {
buf = make([]byte, f.opt.ChunkSize)
}
// fs.Debugf(f, "Getting upload block %p", buf)
return buf
}
// putBuf returns a buffer to the memory pool and an upload token
//
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
// putUploadBlock returns a block to the pool of size chunkSize
func (f *Fs) putUploadBlock(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) != int(f.opt.ChunkSize) {
panic("bad blocksize returned to pool")
}
f.uploadToken.Put()
// fs.Debugf(f, "Returning upload block %p", buf)
f.bufferTokens <- buf
}
// Return an Object from a path
@@ -708,8 +668,8 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
remote := file.Name[len(prefix):]
// Check for directory
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if isDirectory && len(remote) > 1 {
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
}
if addBucket {
@@ -1150,8 +1110,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
bucket, directory := f.split(dir)
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
if bucket == "" {
return errors.New("can't purge from root")
}
@@ -1175,10 +1134,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, f.ci.Transfers)
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
var wg sync.WaitGroup
wg.Add(f.ci.Transfers)
for i := 0; i < f.ci.Transfers; i++ {
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for object := range toBeDeleted {
@@ -1190,7 +1149,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(ctx, err)
tr.Done(err)
}
}()
}
@@ -1218,7 +1177,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
toBeDeleted <- object
}
last = remote
tr.Done(ctx, nil)
tr.Done(nil)
}
return nil
}))
@@ -1226,79 +1185,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
wg.Wait()
if !oldOnly {
checkErr(f.Rmdir(ctx, dir))
checkErr(f.Rmdir(ctx, ""))
}
return errReturn
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
}
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true)
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
}
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
if srcObj.size >= int64(f.opt.CopyCutoff) {
if newInfo == nil {
newInfo, err = srcObj.getMetaData(ctx)
if err != nil {
return err
}
}
up, err := f.newLargeUpload(ctx, dstObj, nil, srcObj, f.opt.CopyCutoff, true, newInfo)
if err != nil {
return err
}
return up.Upload(ctx)
}
dstBucket, dstPath := dstObj.split()
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
destBucketID, err := f.getBucketID(ctx, dstBucket)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: f.opt.Enc.FromStandardPath(dstPath),
DestBucketID: destBucketID,
}
if newInfo == nil {
request.MetadataDirective = "COPY"
} else {
request.MetadataDirective = "REPLACE"
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(&response)
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
@@ -1308,21 +1210,47 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err := f.copy(ctx, dstObj, srcObj, nil)
destBucketID, err := f.getBucketID(ctx, dstBucket)
if err != nil {
return nil, err
}
return dstObj, nil
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: f.opt.Enc.FromStandardPath(dstPath),
MetadataDirective: "COPY",
DestBucketID: destBucketID,
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
o := &Object{
fs: f,
remote: remote,
}
err = o.decodeMetaDataFileInfo(&response)
if err != nil {
return nil, err
}
return o, nil
}
// Hashes returns the supported hash sets.
@@ -1365,7 +1293,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
}
// PublicLink returns a link for downloading without account
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
bucket, bucketPath := f.split(remote)
var RootURL string
if f.opt.DownloadURL == "" {
@@ -1442,21 +1370,6 @@ func (o *Object) Size() int64 {
return o.size
}
// Clean the SHA1
//
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
const unverified = "unverified:"
if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
@@ -1472,7 +1385,12 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if o.sha1 == "" || o.sha1 == "none" {
o.sha1 = Info[sha1Key]
}
o.sha1 = cleanSHA1(o.sha1)
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
const unverified = "unverified:"
if strings.HasPrefix(o.sha1, unverified) {
o.sha1 = o.sha1[len(unverified):]
}
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
@@ -1501,11 +1419,8 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
// getMetaDataListing gets the metadata from the object unconditionally from the listing
//
// Note that listing is a class C transaction which costs more than
// the B transaction used in getMetaData
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
bucket, bucketPath := o.split()
maxSearched := 1
var timestamp api.Timestamp
@@ -1538,19 +1453,6 @@ func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err er
return info, nil
}
// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// If using versions and have a version suffix, need to list the directory to find the correct versions
if o.fs.opt.Versions {
timestamp, _ := api.RemoveVersion(o.remote)
if !timestamp.IsZero() {
return o.getMetaDataListing(ctx)
}
}
_, info, err = o.getOrHead(ctx, "HEAD", nil)
return info, err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
@@ -1609,10 +1511,28 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if err != nil {
return err
}
_, bucketPath := o.split()
info.Info[timeKey] = timeString(modTime)
// Copy to the same name, overwriting the metadata only
return o.fs.copy(ctx, o, o, info)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: o.id,
Name: o.fs.opt.Enc.FromStandardPath(bucketPath), // copy to same name
MetadataDirective: "REPLACE",
ContentType: info.ContentType,
Info: info.Info,
}
var response api.FileInfo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(&response)
}
// Storable returns if this object is storable
@@ -1680,11 +1600,12 @@ func (file *openFile) Close() (err error) {
// Check it satisfies the interfaces
var _ io.ReadCloser = &openFile{}
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: method,
Options: options,
NoResponse: method == "HEAD",
Method: "GET",
Options: options,
}
// Use downloadUrl from backblaze if downloadUrl is not set
@@ -1695,74 +1616,43 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set and not using DownloadURL otherwise by name
if o.id != "" && o.fs.opt.DownloadURL == "" {
// Download by id if set otherwise by name
if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
} else {
bucket, bucketPath := o.split()
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
// 404 for files, 400 for directories
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound
}
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
return nil, errors.Wrap(err, "failed to open for download")
}
// NB resp may be Open here - don't return err != nil without closing
// Convert the Headers into an api.File
var uploadTimestamp api.Timestamp
err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
if err != nil {
fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
}
var Info = make(map[string]string)
for k, vs := range resp.Header {
k = strings.ToLower(k)
for _, v := range vs {
if strings.HasPrefix(k, headerPrefix) {
Info[k[len(headerPrefix):]] = v
}
}
}
info = &api.File{
ID: resp.Header.Get(idHeader),
Name: resp.Header.Get(nameHeader),
Action: "upload",
Size: resp.ContentLength,
UploadTimestamp: uploadTimestamp,
SHA1: resp.Header.Get(sha1Header),
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
return resp, info, nil
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
resp, info, err := o.getOrHead(ctx, "GET", options)
if err != nil {
return nil, err
}
// Don't check length or hash or metadata on partial content
if resp.StatusCode == http.StatusPartialContent {
return resp.Body, nil
}
err = o.decodeMetaData(info)
// Parse the time out of the headers if possible
err = o.parseTimeString(resp.Header.Get(timeHeader))
if err != nil {
_ = resp.Body.Close()
return nil, err
}
// Read sha1 from header if it isn't set
if o.sha1 == "" {
o.sha1 = resp.Header.Get(sha1Header)
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
// if sha1 header is "none" (in big files), then need
// to read it from the metadata
if o.sha1 == "none" {
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
}
// Don't check length or hash on partial content
if resp.StatusCode == http.StatusPartialContent {
return resp.Body, nil
}
return newOpenFile(o, resp), nil
}
@@ -1817,8 +1707,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getBuf(false)
buf := o.fs.getUploadBlock()
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
@@ -1828,24 +1717,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
up, err := o.fs.newLargeUpload(ctx, o, in, src)
if err != nil {
o.fs.putBuf(buf, false)
o.fs.putUploadBlock(buf)
return err
}
// NB Stream returns the buffer and token
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putBuf(buf, false)
defer o.fs.putUploadBlock(buf)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
o.fs.putBuf(buf, false)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
up, err := o.fs.newLargeUpload(ctx, o, in, src)
if err != nil {
return err
}
@@ -1929,7 +1816,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
RootURL: upload.UploadURL,
Body: in,
Options: options,
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),

View File

@@ -20,9 +20,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
type hashAppendingReader struct {
@@ -70,26 +68,20 @@ func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
f *Fs // parent Fs
o *Object // object being uploaded
doCopy bool // doing copy rather than upload
what string // text name of operation for logs
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
}
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
@@ -97,8 +89,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(o.fs.opt.ChunkSize) != 0 {
parts++
}
if parts > maxParts {
@@ -107,6 +99,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts
}
modTime := src.ModTime(ctx)
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
@@ -117,24 +110,18 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
}
if newInfo == nil {
modTime := src.ModTime(ctx)
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
ContentType: fs.MimeType(ctx, src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
}
} else {
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
@@ -144,24 +131,18 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if err != nil {
return nil, err
}
up = &largeUpload{
f: f,
o: o,
doCopy: doCopy,
what: "upload",
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
if doCopy {
up.what = "copy"
up.src = src.(*Object)
} else {
up.in, up.wrap = accounting.UnWrap(in)
in, wrap := accounting.UnWrap(in)
up = &largeUpload{
f: f,
o: o,
in: in,
wrap: wrap,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
}
return up, nil
}
@@ -203,6 +184,13 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
@@ -275,41 +263,9 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
return err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done copying chunk %d", part)
}
return err
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
Path: "/b2_finish_large_file",
@@ -331,7 +287,6 @@ func (up *largeUpload) finish(ctx context.Context) error {
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
@@ -344,139 +299,139 @@ func (up *largeUpload) cancel(ctx context.Context) error {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err)
}
return err
}
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel(ctx)
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish(ctx)
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
errs := make(chan error, 1)
hasMoreParts := true
var wg sync.WaitGroup
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
outer:
for part := int64(2); hasMoreParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
return nil
})
err = g.Wait()
if err != nil {
return err
// Get a block of memory
buf := up.f.getUploadBlock()
// Read the chunk
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
return up.finishOrCancelOnError(ctx, err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
)
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
func (up *largeUpload) Upload(ctx context.Context) error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
return nil
})
err = g.Wait()
if err != nil {
return err
reqSize := remaining
if reqSize >= int64(up.f.opt.ChunkSize) {
reqSize = int64(up.f.opt.ChunkSize)
}
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
remaining -= reqSize
}
return up.finish(ctx)
wg.Wait()
return up.finishOrCancelOnError(ctx, err, errs)
}

View File

@@ -222,23 +222,3 @@ type AppAuth struct {
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}
// User is returned from /users/me
type User struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}

View File

@@ -26,7 +26,6 @@ import (
"time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
@@ -54,7 +53,8 @@ const (
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0" // ID of root folder is always this
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
@@ -84,36 +84,46 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error
// If using box config.json, use JWT auth
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Box App Client Id.\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.",
Help: "Box App config.json location\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
@@ -149,31 +159,10 @@ func init() {
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
@@ -196,6 +185,7 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimS
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
@@ -246,8 +236,6 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
}
// Fs represents a remote box
@@ -299,7 +287,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a box 'url'
// parsePath parses an box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -330,7 +318,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -339,7 +327,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
if item.Name == leaf {
info = item
return true
}
@@ -372,7 +360,8 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -385,60 +374,32 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
root = parsePath(root)
client := fshttp.NewClient(ctx)
var ts *oauthutil.TokenSource
// If not using an accessToken, create an oauth client and tokensource
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
// If using an accessToken, set the Authorization header
if f.opt.AccessToken != "" {
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
}
// Get rootFolderID
rootID := f.opt.RootFolderID
// Get rootID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -463,7 +424,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
return nil, err
}
f.features.Fill(ctx, &tempF)
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -514,7 +475,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) {
if item.Name == leaf {
pathIDOut = item.ID
return true
}
@@ -631,6 +592,10 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -671,7 +636,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return
}
@@ -727,7 +692,13 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -752,6 +723,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -791,7 +766,7 @@ func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
@@ -856,8 +831,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// move a file or folder
@@ -885,31 +860,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
return info, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/users/me",
}
var user api.User
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(user.SpaceUsed), // bytes in use
Total: fs.NewUsageValue(user.SpaceAmount), // bytes total
Free: fs.NewUsageValue(user.SpaceAmount - user.SpaceUsed), // bytes free
}
return usage, nil
}
// Move src to this remote using server-side move operations.
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
@@ -945,7 +896,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -958,14 +909,64 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
// Do the move
_, err = f.move(ctx, "/folders/", srcID, dstLeaf, dstDirectoryID)
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
if err != nil {
return err
}
@@ -974,7 +975,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false)
var opts rest.Opts
if err == nil {
@@ -1013,66 +1014,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return info.SharedLink.URL, err
}
// deletePermanently permanently deletes a trashed file
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
opts := rest.Opts{
Method: "DELETE",
NoResponse: true,
}
if itemType == api.ItemTypeFile {
opts.Path = "/files/" + id + "/trash"
} else {
opts.Path = "/folders/" + id + "/trash"
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
})
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/trash/items",
Parameters: url.Values{
"fields": []string{"type", "id"},
},
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
return errors.Wrap(err, "failed to delete file")
}
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}
}
return
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -1226,7 +1167,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
ContentModifiedAt: api.Time(modTime),
@@ -1245,7 +1186,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
MultipartContentName: "contents",
MultipartFileName: upload.Name,
RootURL: uploadURL,
Options: options,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
@@ -1272,26 +1212,24 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
modTime := src.ModTime(ctx)
remote := o.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return err
}
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
err = o.upload(ctx, in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
}
return err
}
@@ -1312,12 +1250,10 @@ var (
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,4 +1,4 @@
// multipart upload for box
// multpart upload for box
package box
@@ -19,7 +19,6 @@ import (
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
)
@@ -55,7 +54,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -65,7 +64,6 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
Options: options,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
@@ -173,7 +171,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
@@ -183,13 +181,15 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong
defer atexit.OnError(&err, func() {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
}
})()
}()
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
@@ -236,7 +236,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {

111
backend/cache/cache.go vendored
View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -65,10 +65,9 @@ func init() {
Name: "cache",
Description: "Cache a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "plex_url",
@@ -87,7 +86,7 @@ func init() {
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server",
Help: "Skip all certificate verifications when connecting to the Plex server",
Advanced: true,
}, {
Name: "chunk_size",
@@ -109,7 +108,7 @@ will need to be cleared or unexpected EOF errors will occur.`,
}},
}, {
Name: "info_age",
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
Help: `How long to cache file structure information (directory listings, file size, times etc).
If all write operations are done through the cache then you can safely make
this value very large as the cache store will also be updated in real time.`,
Default: DefCacheInfoAge,
@@ -339,8 +338,8 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// NewFs constructs a Fs from the path, container:path
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -361,10 +360,15 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
}
remotePath := fspath.JoinRootPath(wPath, rootPath)
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -385,7 +389,6 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
cleanupChan: make(chan bool, 1),
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
f.plexConnector = &plexConnector{}
@@ -479,7 +482,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
f.tempFs, err = cache.Get(f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
}
@@ -506,16 +509,19 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
DuplicateFiles: false, // storage doesn't permit this
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
// override only those features that use a temp fs and it doesn't support them
//f.features.ChangeNotify = f.ChangeNotify
if f.opt.TempWritePath != "" {
if f.tempFs.Features().Copy == nil {
f.features.Copy = nil
}
if f.tempFs.Features().Move == nil {
f.features.Move = nil
}
@@ -581,7 +587,7 @@ Some valid examples are:
"0:10" -> the first ten chunks
Any parameter with a key that starts with "file" can be used to
specify files to fetch, e.g.
specify files to fetch, eg
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
@@ -1236,7 +1242,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
@@ -1517,7 +1523,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.put(ctx, in, src, options, do)
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
@@ -1526,9 +1532,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
return nil, fs.ErrorCantCopy
}
if f.opt.TempWritePath != "" && src.Fs() == f.tempFs {
return nil, fs.ErrorCantCopy
}
// the source must be a cached object or we abort
srcObj, ok := src.(*Object)
if !ok {
@@ -1594,7 +1597,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return co, nil
}
// Move src to this remote using server-side move operations.
// Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
@@ -1698,20 +1701,17 @@ func (f *Fs) Hashes() hash.Set {
return f.Fs.Hashes()
}
// Purge all files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if dir == "" {
// FIXME this isn't quite right as it should purge the dir prefix
fs.Infof(f, "purging cache")
f.cache.Purge()
}
// Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context) error {
fs.Infof(f, "purging cache")
f.cache.Purge()
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
return nil
}
err := do(ctx, dir)
err := do(ctx)
if err != nil {
return err
}
@@ -1828,19 +1828,6 @@ func (f *Fs) isRootInPath(p string) bool {
return strings.HasPrefix(p, f.Root()+"/")
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
for _, dir := range dirs {
_ = f.cache.RemoveDir(dir.Remote())
}
return do(ctx, dirs)
}
// DirCacheFlush flushes the dir cache
func (f *Fs) DirCacheFlush() {
_ = f.cache.RemoveDir("")
@@ -1895,41 +1882,6 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
var commandHelp = []fs.CommandHelp{
{
Name: "stats",
Short: "Print stats on the cache backend in JSON format.",
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1947,7 +1899,4 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
)

View File

@@ -1,5 +1,4 @@
// +build !plan9,!js
// +build !race
// +build !plan9
package cache_test
@@ -16,7 +15,9 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"testing"
"time"
@@ -30,10 +31,12 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -49,7 +52,9 @@ const (
var (
remoteName string
mountDir string
uploadDir string
useMount bool
runInstance *run
errNotSupported = errors.New("not supported")
decryptedToEncryptedRemotes = map[string]string{
@@ -85,7 +90,9 @@ var (
func init() {
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
}
// TestMain drives the tests
@@ -93,7 +100,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
log.Printf("Running with the following params: \n remote: %v", remoteName)
log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -266,8 +273,32 @@ func TestInternalObjNotFound(t *testing.T) {
require.Nil(t, obj)
}
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
if !runInstance.useMount {
t.Skip("test needs mount mode")
}
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
require.NoError(t, err)
require.Equal(t, "test content", string(data))
}
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -311,7 +342,6 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
}
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -661,6 +691,79 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.useMount {
t.Skipf("needs mount")
}
if !runInstance.wrappedIsExternal {
t.Skipf("needs drive")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
// Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached directory cleared")
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
@@ -808,9 +911,15 @@ func TestInternalBug2117(t *testing.T) {
type run struct {
okDiff time.Duration
runDefaultCfgMap configmap.Simple
mntDir string
tmpUploadDir string
useMount bool
isMounted bool
rootIsCrypt bool
wrappedIsExternal bool
unmountFn func() error
unmountRes chan error
vfs *vfs.VFS
tempFiles []*os.File
dbPath string
chunkPath string
@@ -820,7 +929,9 @@ type run struct {
func newRun() *run {
var err error
r := &run{
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
useMount: useMount,
isMounted: false,
}
// Read in all the defaults for all the options
@@ -833,6 +944,32 @@ func newRun() *run {
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
}
if mountDir == "" {
if runtime.GOOS != "windows" {
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
return nil
}
} else {
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Print("Couldn't find free drive letter for test")
found:
r.mntDir = drive
}
} else {
r.mntDir = mountDir
}
log.Printf("Mount Dir: %v", r.mntDir)
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
@@ -872,15 +1009,6 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
return nil, nil
}
// Config to pass to NewFs
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// if the remote doesn't exist, create a new one with a local one for it
// identify which is the cache remote (it can be wrapped by a crypt too)
rootIsCrypt := false
@@ -889,8 +1017,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
localRemote := remote + "-local"
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
config.FileSet(remote, "type", "cache")
config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
} else {
remoteType := config.FileGet(remote, "type", "")
if remoteType == "" {
@@ -900,8 +1028,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
if remoteType != "cache" {
if remoteType == "crypt" {
rootIsCrypt = true
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
config.FileSet(remote, "password", cryptPassword1)
config.FileSet(remote, "password2", cryptPassword2)
}
remoteRemote := config.FileGet(remote, "remote", "")
if remoteRemote == "" {
@@ -925,15 +1053,22 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
ci := fs.GetConfig(context.Background())
ci.LowLevelRetries = 1
fs.Config.LowLevelRetries = 1
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// Instantiate root
if purge {
boltDb.PurgeTempUploads()
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
}
f, err := cache.NewFs(context.Background(), remote, id, m)
f, err := cache.NewFs(remote, id, m)
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@@ -947,21 +1082,33 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = f.Features().Purge(context.Background(), "")
_ = f.Features().Purge(context.Background())
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
}
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "")
if r.useMount && r.isMounted {
r.unmountFs(t, f)
}
err := f.Features().Purge(context.Background())
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
cfs.StopBackgroundRunners()
if r.useMount && runtime.GOOS != "windows" {
err = os.RemoveAll(r.mntDir)
require.NoError(t, err)
}
err = os.RemoveAll(r.tmpUploadDir)
require.NoError(t, err)
@@ -992,6 +1139,23 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
return f
}
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
r.writeRemoteBytes(t, f, remote, testData)
return remote
}
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
return r.writeObjectBytes(t, f, remote, testData)
}
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
r.writeRemoteBytes(t, f, remote, []byte(content))
}
@@ -1001,11 +1165,37 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
}
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
r.writeObjectBytes(t, f, remote, data)
var err error
if r.useMount {
err = r.retryBlock(func() error {
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
}, 3, time.Second*3)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectBytes(t, f, remote, data)
}
}
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
r.writeObjectReader(t, f, remote, in)
defer func() {
_ = in.Close()
}()
if r.useMount {
out, err := os.Create(path.Join(r.mntDir, remote))
require.NoError(t, err)
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectReader(t, f, remote, in)
}
}
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
@@ -1022,6 +1212,10 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo)
require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
}
return obj
}
@@ -1029,16 +1223,26 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
var err error
var obj fs.Object
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
if r.useMount {
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
_, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
}
require.NoError(t, err)
return obj
@@ -1048,12 +1252,30 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
size := end - offset
checkSample := make([]byte, size)
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
if r.useMount {
f, err := os.Open(path.Join(r.mntDir, remote))
defer func() {
_ = f.Close()
}()
if err != nil {
return checkSample, err
}
_, _ = f.Seek(offset, io.SeekStart)
totalRead, err := io.ReadFull(f, checkSample)
checkSample = checkSample[:totalRead]
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return checkSample, err
}
} else {
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
@@ -1076,19 +1298,28 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
}
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
err := f.Mkdir(context.Background(), remote)
var err error
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(context.Background(), remote)
}
require.NoError(t, err)
}
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
var err error
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
if r.useMount {
err = os.Remove(path.Join(r.mntDir, remote))
} else {
err = obj.Remove(context.Background())
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else {
err = obj.Remove(context.Background())
}
}
return err
@@ -1097,14 +1328,42 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error
var l []interface{}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll)
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
}
}
return l, err
}
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
var err error
var l []string
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll.Name())
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
}
require.NoError(t, err)
return l
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
@@ -1129,7 +1388,13 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().DirMove != nil {
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
if err != nil {
return err
@@ -1145,7 +1410,13 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Move != nil {
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1165,7 +1436,13 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Copy != nil {
if r.useMount {
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1185,6 +1462,13 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return time.Time{}, err
@@ -1195,6 +1479,13 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return int64(0), err
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return int64(0), err
@@ -1205,15 +1496,28 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
var err error
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
if r.useMount {
var f *os.File
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() {
_ = f.Close()
r.vfs.WaitForWriters(10 * time.Second)
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1)
}
data1 := []byte(data + append)
reader := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), reader, objInfo1)
return err
}

20
backend/cache/cache_mount_other_test.go vendored Normal file
View File

@@ -0,0 +1,20 @@
// +build !linux !go1.11
// +build !darwin !go1.11
// +build !freebsd !go1.11
// +build !windows
package cache_test
import (
"testing"
"github.com/rclone/rclone/fs"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
panic("mountFs not defined for this platform")
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
panic("unmountFs not defined for this platform")
}

78
backend/cache/cache_mount_unix_test.go vendored Normal file
View File

@@ -0,0 +1,78 @@
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -0,0 +1,124 @@
// +build windows
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,7 +1,6 @@
// Test Cache filesystem interface
// +build !plan9,!js
// +build !race
// +build !plan9
package cache_test
@@ -18,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})

View File

@@ -1,6 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 js
// +build plan9
package cache

View File

@@ -1,5 +1,4 @@
// +build !plan9,!js
// +build !race
// +build !plan9
package cache_test

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -101,6 +101,15 @@ func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name))
}
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time {
return time.Unix(0, d.CacheModTime)

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -16,10 +16,10 @@ import (
"sync"
"time"
bolt "github.com/etcd-io/bbolt"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
)
// Constants
@@ -767,6 +767,31 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
return err
}
func (b *Persistent) dumpRoot() string {
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
m := make(map[string]interface{})
c := buk.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
buk2 := buk.Bucket(k)
m[string(k)] = itBuckets(buk2)
} else {
m[string(k)] = "-"
}
}
return m
}
var mm map[string]interface{}
_ = b.db.View(func(tx *bolt.Tx) error {
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
return nil
})
raw, _ := json.MarshalIndent(mm, "", " ")
return string(raw)
}
// addPendingUpload adds a new file to the pending queue of uploads
func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -980,6 +1005,15 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
// TO BE USED IN TESTING ONLY
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -1027,6 +1061,19 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
})
}
// PurgeTempUploads will remove all the pending uploads from the queue
// TO BE USED IN TESTING ONLY
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// Close should be called when the program ends gracefully
func (b *Persistent) Close() {
b.cleanupMux.Lock()

View File

@@ -1,23 +0,0 @@
package cache
import bolt "go.etcd.io/bbolt"
// PurgeTempUploads will remove all the pending uploads from the queue
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
@@ -468,15 +467,9 @@ func testPreventCorruption(t *testing.T, f *Fs) {
return obj
}
billyObj := newFile("billy")
billyTxn := billyObj.(*Object).xactID
if f.useNoRename {
require.True(t, billyTxn != "")
} else {
require.True(t, billyTxn == "")
}
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
}
err := f.Mkdir(ctx, billyChunkName(1))
@@ -493,13 +486,11 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
_, err = f.base.NewObject(ctx, billyChunk4Name)
require.NoError(t, err)
_, err = f.NewObject(ctx, billyChunk4Name)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
@@ -528,8 +519,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyTxn := willyObj.(*Object).xactID
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
@@ -570,20 +560,17 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID
}
return
return obj, filename
}
f.opt.FailHard = false
file, fileName, fileTxn := newFile(f, "wreaker")
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
@@ -662,7 +649,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
@@ -676,174 +663,6 @@ func testMetadataInput(t *testing.T, f *Fs) {
runSubtest(futureMeta, "future")
}
// Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" {
t.Skip("this test requires metadata support")
}
saveOpt := f.opt
ctx := context.Background()
f.opt.FailHard = true
const dir = "future"
const file = dir + "/test"
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putPart := func(name string, part int, data, msg string) {
if part > 0 {
name = f.makeChunkName(name, part-1, "", "")
}
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg)
}
// simulate chunked object from future
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
putPart(file, 0, meta, "metaobject")
putPart(file, 1, "abc", "chunk1")
putPart(file, 2, "def", "chunk2")
putPart(file, 3, "ghi", "chunk3")
// List should succeed
ls, err := f.List(ctx, dir)
assert.NoError(t, err)
assert.Equal(t, 1, len(ls))
assert.Equal(t, int64(9), ls[0].Size())
// NewObject should succeed
obj, err := f.NewObject(ctx, file)
assert.NoError(t, err)
assert.Equal(t, file, obj.Remote())
assert.Equal(t, int64(9), obj.Size())
// Hash must fail
_, err = obj.Hash(ctx, hash.SHA1)
assert.Equal(t, ErrMetaUnknown, err)
// Move must fail
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
assert.Nil(t, mobj)
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
// Put must fail
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
buf := bytes.NewBufferString("abc")
_, err = f.Put(ctx, buf, oi)
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
}
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
func testBackwardsCompatibility(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't do norename transactions without metadata")
}
const dir = "backcomp"
ctx := context.Background()
saveOpt := f.opt
saveUseNoRename := f.useNoRename
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
f.useNoRename = saveUseNoRename
}()
f.opt.ChunkSize = fs.SizeSuffix(10)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(250)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
f.useNoRename = false
file, fileName := newFile(f, "renamefile")
f.opt.FailHard = false
item := fstest.NewItem(fileName, contents, modTime)
var items []fstest.Item
items = append(items, item)
f.useNoRename = true
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.NoError(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.NoError(t, err)
f.opt.FailHard = false
_ = file.Remove(ctx)
}
func testChunkerServerSideMove(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't test norename transactions without metadata")
}
ctx := context.Background()
const dir = "servermovetest"
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
assert.NoError(t, err)
fs1, isChunkerFs := subFs1.(*Fs)
assert.True(t, isChunkerFs)
fs1.useNoRename = false
fs1.opt.ChunkSize = fs.SizeSuffix(3)
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
assert.NoError(t, err)
fs2, isChunkerFs := subFs2.(*Fs)
assert.True(t, isChunkerFs)
fs2.useNoRename = true
fs2.opt.ChunkSize = fs.SizeSuffix(3)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef"
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
assert.NoError(t, err)
assert.Equal(t, int64(len(contents)), dstFile.Size())
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
_ = operations.Purge(ctx, f.base, dir)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -867,15 +686,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
t.Run("FutureProof", func(t *testing.T) {
testFutureProof(t, f)
})
t.Run("BackwardsCompatibility", func(t *testing.T) {
testBackwardsCompatibility(t, f)
})
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -15,10 +15,10 @@ import (
// Command line flags
var (
// Invalid characters are not supported by some remotes, e.g. Mailru.
// Invalid characters are not supported by some remotes, eg. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, e.g. when test_all runs backend tests.
// by default when -remote is set, eg. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)

View File

@@ -1 +0,0 @@
test

File diff suppressed because it is too large Load Diff

View File

@@ -1,65 +0,0 @@
// Test Crypt filesystem interface
package compress
import (
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
func TestRemoteGzip(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
})
}

View File

@@ -71,6 +71,30 @@ type ReadSeekCloser interface {
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
// EncryptFileName encrypts a file path
EncryptFileName(string) string
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
DecryptFileName(string) (string, error)
// EncryptDirName encrypts a directory path
EncryptDirName(string) string
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
DecryptDirName(string) (string, error)
// EncryptData
EncryptData(io.Reader) (io.Reader, error)
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
DecryptedSize(int64) (int64, error)
// NameEncryptionMode returns the used mode for name handling
NameEncryptionMode() NameEncryptionMode
}
// NameEncryptionMode is the type of file name encryption in use
type NameEncryptionMode int
@@ -112,8 +136,7 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
type cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
@@ -125,8 +148,8 @@ type Cipher struct {
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
c := &Cipher{
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
c := &cipher{
mode: mode,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
@@ -147,9 +170,9 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
// If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// Note that empty passsword makes all 0x00 keys which is used in the
// tests.
func (c *Cipher) Key(password, salt string) (err error) {
func (c *cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt
if salt != "" {
@@ -173,12 +196,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() []byte {
func (c *cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf []byte) {
func (c *cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
@@ -217,13 +240,13 @@ func decodeFileName(in string) ([]byte, error) {
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
// Rogaway.
//
// This makes for deterministic encryption which is what we want - the
// This makes for determinstic encryption which is what we want - the
// same filename must encrypt to the same thing.
//
// This means that
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string {
func (c *cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -233,7 +256,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
}
// decryptSegment decrypts a path segment
func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -260,7 +283,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
}
// Simple obfuscation routines
func (c *Cipher) obfuscateSegment(plaintext string) string {
func (c *cipher) obfuscateSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -347,7 +370,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
return result.String()
}
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -434,7 +457,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// encryptFileName encrypts a file path
func (c *Cipher) encryptFileName(in string) string {
func (c *cipher) encryptFileName(in string) string {
segments := strings.Split(in, "/")
for i := range segments {
// Skip directory name encryption if the user chose to
@@ -452,7 +475,7 @@ func (c *Cipher) encryptFileName(in string) string {
}
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
func (c *cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + encryptedSuffix
}
@@ -460,7 +483,7 @@ func (c *Cipher) EncryptFileName(in string) string {
}
// EncryptDirName encrypts a directory path
func (c *Cipher) EncryptDirName(in string) string {
func (c *cipher) EncryptDirName(in string) string {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in
}
@@ -468,7 +491,7 @@ func (c *Cipher) EncryptDirName(in string) string {
}
// decryptFileName decrypts a file path
func (c *Cipher) decryptFileName(in string) (string, error) {
func (c *cipher) decryptFileName(in string) (string, error) {
segments := strings.Split(in, "/")
for i := range segments {
var err error
@@ -491,7 +514,7 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
}
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
func (c *cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
@@ -503,15 +526,14 @@ func (c *Cipher) DecryptFileName(in string) (string, error) {
}
// DecryptDirName decrypts a directory path
func (c *Cipher) DecryptDirName(in string) (string, error) {
func (c *cipher) DecryptDirName(in string) (string, error) {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in, nil
}
return c.decryptFileName(in)
}
// NameEncryptionMode returns the encryption mode in use for names
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
return c.mode
}
@@ -559,7 +581,7 @@ func (n *nonce) increment() {
n.carry(0)
}
// add a uint64 to the nonce
// add an uint64 to the nonce
func (n *nonce) add(x uint64) {
carry := uint16(0)
for i := 0; i < 8; i++ {
@@ -579,7 +601,7 @@ func (n *nonce) add(x uint64) {
type encrypter struct {
mu sync.Mutex
in io.Reader
c *Cipher
c *cipher
nonce nonce
buf []byte
readBuf []byte
@@ -589,7 +611,7 @@ type encrypter struct {
}
// newEncrypter creates a new file handle encrypting on the fly
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
fh := &encrypter{
in: in,
c: c,
@@ -633,8 +655,11 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFull will return 0, err
// Write nonce to start of block
copy(fh.buf, fh.nonce[:])
// Encrypt the block using the nonce
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
block := fh.buf
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
@@ -658,19 +683,13 @@ func (fh *encrypter) finish(err error) (int, error) {
}
// Encrypt data encrypts the data stream
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) {
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
out, err := c.newEncrypter(in, nil)
if err != nil {
return nil, nil, err
return nil, err
}
return wrap(out), out, nil // and wrap the accounting back on
}
// EncryptData encrypts the data stream
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
out, _, err := c.encryptData(in)
return out, err
return wrap(out), nil // and wrap the accounting back on
}
// decrypter decrypts an io.ReaderCloser on the fly
@@ -679,7 +698,7 @@ type decrypter struct {
rc io.ReadCloser
nonce nonce
initialNonce nonce
c *Cipher
c *cipher
buf []byte
readBuf []byte
bufIndex int
@@ -690,7 +709,7 @@ type decrypter struct {
}
// newDecrypter creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
fh := &decrypter{
rc: rc,
c: c,
@@ -718,7 +737,7 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
@@ -779,7 +798,8 @@ func (fh *decrypter) fillBuffer() (err error) {
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
block := fh.buf
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil {
return err // return pending error as it is likely more accurate
@@ -992,7 +1012,7 @@ func (fh *decrypter) finishAndClose(err error) error {
}
// DecryptData decrypts the data stream
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
out, err := c.newDecrypter(rc)
if err != nil {
return nil, err
@@ -1005,7 +1025,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
if err != nil {
return nil, err
@@ -1014,7 +1034,7 @@ func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
}
// EncryptedSize calculates the size of the data when encrypted
func (c *Cipher) EncryptedSize(size int64) int64 {
func (c *cipher) EncryptedSize(size int64) int64 {
blocks, residue := size/blockDataSize, size%blockDataSize
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
if residue != 0 {
@@ -1024,7 +1044,7 @@ func (c *Cipher) EncryptedSize(size int64) int64 {
}
// DecryptedSize calculates the size of the data when decrypted
func (c *Cipher) DecryptedSize(size int64) (int64, error) {
func (c *cipher) DecryptedSize(size int64) (int64, error) {
size -= int64(fileHeaderSize)
if size < 0 {
return 0, ErrorEncryptedFileTooShort
@@ -1043,6 +1063,7 @@ func (c *Cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces
var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil)

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -785,7 +784,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in := &errorReader{io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
@@ -794,6 +793,14 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
assert.Equal(t, int64(32), n)
}
type errorReader struct {
err error
}
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
type closeDetector struct {
io.Reader
closed int
@@ -831,7 +838,7 @@ func TestNewDecrypter(t *testing.T) {
assert.Equal(t, 1, cd.closed)
}
er := &readers.ErrorReader{Err: errors.New("potato")}
er := &errorReader{errors.New("potato")}
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
@@ -857,7 +864,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in2 := &errorReader{io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
@@ -929,7 +936,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
assert.Equal(t, 0, n)
}
// Now try decoding it with an open/seek
// Now try decoding it with a open/seek
for _, offset := range trials {
for _, limit := range limits {
if offset+limit > len(plaintext) {
@@ -1111,7 +1118,7 @@ func TestDecrypterRead(t *testing.T) {
// Test producing an error on the file on Read the underlying file
in1 := bytes.NewBuffer(file1)
in2 := &readers.ErrorReader{Err: errors.New("potato")}
in2 := &errorReader{errors.New("potato")}
in := io.MultiReader(in1, in2)
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -27,10 +26,9 @@ func init() {
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
@@ -49,10 +47,8 @@ func init() {
},
},
}, {
Name: "directory_name_encryption",
Help: `Option to either encrypt directory names or leave them intact.
NB If filename_encryption is "off" then this option will do nothing.`,
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Default: true,
Examples: []fs.OptionExample{
{
@@ -73,20 +69,6 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
This can be used, for example, to change file name encryption type
without re-uploading all the data. Just make two crypt backends
pointing to two different directories with the single changed
parameter and use rclone move to move the files between the crypt
remotes.`,
Advanced: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
@@ -101,27 +83,12 @@ names, or for debugging purposes.`,
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "no_data_encryption",
Help: "Option to either encrypt file data or leave it unencrypted.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{
{
Value: "true",
Help: "Don't encrypt file data, leave it unencrypted.",
},
{
Value: "false",
Help: "Encrypt file data.",
},
},
}},
})
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (*Cipher, error) {
func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
@@ -148,7 +115,7 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (*Cipher, error) {
func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -159,7 +126,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -174,25 +141,24 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
// Make sure to remove trailing . referring to the current dir
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Make sure to remove trailing . reffering to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
}
// Look for a file first
var wrappedFs fs.Fs
if rpath == "" {
wrappedFs, err = cache.Get(ctx, remote)
} else {
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
wrappedFs, err = cache.Get(ctx, remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(ctx, remotePath)
}
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
}
f := &Fs{
Fs: wrappedFs,
@@ -201,7 +167,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
opt: *opt,
cipher: cipher,
}
cache.PinUntilFinalized(f.Fs, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
@@ -213,8 +178,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
return f, err
}
@@ -224,10 +188,8 @@ type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
NoDataEncryption bool `config:"no_data_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
}
@@ -239,7 +201,7 @@ type Fs struct {
root string
opt Options
features *fs.Features // optional features
cipher *Cipher
cipher Cipher
}
// Name of the remote (as passed into NewFs)
@@ -276,7 +238,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
*entries = append(*entries, f.newObject(obj))
}
// Encrypt a directory file name to entries.
// Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
@@ -362,12 +324,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
if f.opt.NoDataEncryption {
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
}
// Encrypt the data into wrappedIn
wrappedIn, encrypter, err := f.cipher.encryptData(in)
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
@@ -391,7 +349,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...)
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
@@ -450,21 +408,21 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
}
// Purge all files in the directory specified
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, f.cipher.EncryptDirName(dir))
return do(ctx)
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
@@ -489,7 +447,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.newObject(oResult), nil
}
// Move src to this remote using server-side move operations.
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
@@ -515,7 +473,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -544,11 +502,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, encrypter, err := f.cipher.encryptData(in)
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce))
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
@@ -601,53 +559,18 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// computeHashWithNonce takes the nonce and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
if f.opt.NoDataEncryption {
return src.Hash(ctx, hashType)
}
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.newDecrypter(in)
d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
@@ -672,7 +595,30 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return "", errors.Wrap(err, "failed to close nonce read")
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
// Open the src for input
in, err = src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// MergeDirs merges the contents of all the directories passed
@@ -699,7 +645,7 @@ func (f *Fs) DirCacheFlush() {
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
@@ -707,9 +653,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
o, err := f.NewObject(ctx, remote)
if err != nil {
// assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink)
return do(ctx, f.cipher.EncryptDirName(remote))
}
return do(ctx, o.(*Object).Object.Remote(), expire, unlink)
return do(ctx, o.(*Object).Object.Remote())
}
// ChangeNotify calls the passed function with a path
@@ -744,67 +690,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage Example:
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out = append(out, fileName)
}
return out, nil
case "encode":
out := make([]string, 0, len(arg))
for _, fileName := range arg {
encryptedFileName := f.EncryptFileName(fileName)
out = append(out, encryptedFileName)
}
return out, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
@@ -846,13 +731,9 @@ func (o *Object) Remote() string {
// Size returns the size of the file
func (o *Object) Size() int64 {
size := o.Object.Size()
if !o.f.opt.NoDataEncryption {
var err error
size, err = o.f.cipher.DecryptedSize(size)
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
return size
}
@@ -870,10 +751,6 @@ func (o *Object) UnWrap() fs.Object {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if o.f.opt.NoDataEncryption {
return o.Object.Open(ctx, options...)
}
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
@@ -949,30 +826,18 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
nonce nonce
f *Fs
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo {
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
nonce: nonce,
}
}
@@ -998,23 +863,6 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
var srcObj fs.Object
var ok bool
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
return "", nil
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
}
return "", nil
}
@@ -1053,7 +901,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
@@ -1067,7 +914,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -1,143 +0,0 @@
package crypt
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
}
// Test the ObjectInfo
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var (
contents = random.String(100)
path = "hash_test_object"
ctx = context.Background()
)
if wrap {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
var outBuf bytes.Buffer
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encrypter
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
}
func testComputeHash(t *testing.T, f *Fs) {
var (
contents = random.String(100)
path = "compute_hash_test"
ctx = context.Background()
hashType = f.Fs.Hashes().GetOne()
)
if hashType == hash.None {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
require.NoError(t, err)
// Test computed hash matches remote object hash
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
require.NoError(t, err)
assert.Equal(t, remoteObjHash, computedHash)
}
// InternalTest is called by fstests.Run to extra tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
}

View File

@@ -91,26 +91,3 @@ func TestObfuscate(t *testing.T) {
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestNoDataObfuscate runs integration tests against the remote
func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}

1496
backend/drive/drive.go Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -7,21 +7,15 @@ import (
"io"
"io/ioutil"
"mime"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
@@ -196,7 +190,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -210,7 +204,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -274,192 +268,6 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
}
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
srcHash, err := srcObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.NotEqual(t, "", srcHash)
t.Run("Errors", func(t *testing.T) {
_, err := f.makeShortcut(ctx, "", f, "")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't be root")
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't find source")
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing file")
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing directory")
})
t.Run("File", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
require.NoError(t, err)
require.NotNil(t, dstObj)
assert.Equal(t, "shortcut.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
t.Run("Dir", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
require.NoError(t, err)
require.Nil(t, dstObj)
entries, err := f.List(ctx, "shortcutdir")
require.NoError(t, err)
require.Equal(t, 1, len(entries))
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
})
t.Run("Command", func(t *testing.T) {
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "need exactly 2 arguments")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": "doesnotexistremote:",
})
require.Error(t, err)
require.Contains(t, err.Error(), "couldn't find target")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": ".",
})
require.Error(t, err)
require.Contains(t, err.Error(), "target is not a drive backend")
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
"target": fs.ConfigString(f),
})
require.NoError(t, err)
dstObj := dstObjI.(*Object)
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
require.NoError(t, err)
dstObj = dstObjI.(*Object)
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
dstHash, err = dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
func (f *Fs) InternalTestUnTrash(t *testing.T) {
ctx := context.Background()
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
file1,
file2,
}, []string{
"trashDir/subdir",
}, f.Precision())
}
checkObjects()
// Make sure we are using the trash
require.Equal(t, true, f.opt.UseTrash)
// Remove the object and the dir
require.NoError(t, obj1.Remove(ctx))
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
// Check objects gone
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
// Restore the object and directory
r, err := f.unTrashDir(ctx, "trashDir", true)
require.NoError(t, err)
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
// Check objects restored
checkObjects()
// Remove the test dir
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
fi, err := os.Stat(filePath)
require.NoError(t, err)
assert.Equal(t, int64(100), fi.Size())
err = os.Remove(filePath)
require.NoError(t, err)
}
t.Run("BadID", func(t *testing.T) {
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -474,9 +282,6 @@ func (f *Fs) InternalTest(t *testing.T) {
})
})
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -77,10 +77,11 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
return false, err
}
var req *http.Request
req, err = http.NewRequestWithContext(ctx, method, urls, body)
req, err = http.NewRequest(method, urls, body)
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
@@ -113,7 +114,8 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize
totalSize := "*"
if rx.ContentLength >= 0 {

676
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -30,7 +30,6 @@ import (
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
@@ -87,21 +86,12 @@ const (
// by default.
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
maxFileNameLength = 255
)
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{
"files.metadata.write",
"files.content.write",
"files.content.read",
"sharing.write",
// "file_requests.write",
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
},
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
@@ -117,24 +107,8 @@ var (
// DbHashType is the hash.Type for Dropbox
DbHashType hash.Type
// Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
)
// Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig
}
// Make a copy of the config
config := *dropboxConfig
// Make a copy of the scopes with "members.read" appended
config.Scopes = append(config.Scopes, "members.read")
return &config
}
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
@@ -142,19 +116,19 @@ func init() {
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Dropbox App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Dropbox App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
@@ -167,50 +141,10 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "impersonate",
Help: `Impersonate this user when using a business account.
Note that if you want to use impersonate, you should make sure this
flag is set when running "rclone config" as this will cause rclone to
request the "members.read" scope which it won't normally. This is
needed to lookup a members email address into the internal ID that
dropbox uses in the API.
Using the "members.read" scope will require a Dropbox Team Admin
to approve during the OAuth flow.
You will have to use your own App (setting your own client_id and
client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
`,
Name: "impersonate",
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
operations and read operations (e.g. downloading) are supported in this mode.
All other operations will be disabled.`,
Default: false,
Advanced: true,
}, {
Name: "shared_folders",
Help: `Instructs rclone to work on shared folders.
When this flag is used with no path only the List operation is supported and
all available shared folders will be listed. If you specify a path the first part
will be interpreted as the name of shared folder. Rclone will then try to mount this
shared to the root namespace. On success shared folder rclone proceeds normally.
The shared folder is now pretty much a normal folder and all normal operations
are supported.
Note that we don't unmount the shared folder afterwards so the
--dropbox-shared-folders can be omitted after the first use of a particular
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -219,22 +153,20 @@ shared folder.`,
// as invalid characters.
// Testing revealed names with trailing spaces and the DEL character don't work.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: encoder.Base |
Default: (encoder.Base |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8,
}}...),
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -242,10 +174,8 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv files.Client // the connection to the dropbox server
svc files.Client // the connection to the dropbox server (unauthorized)
sharing sharing.Client // as above, but for generating sharing links
users users.Client // as above, but for accessing user information
team team.Client // for the Teams API
@@ -259,9 +189,7 @@ type Fs struct {
//
// Dropbox Objects always have full metadata
type Object struct {
fs *Fs // what this object is part of
id string
url string
fs *Fs // what this object is part of
remote string // The remote path
bytes int64 // size of the object
modTime time.Time // time it was last modified
@@ -297,13 +225,7 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// First check for specific errors
if strings.Contains(baseErrString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(baseErrString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
@@ -339,7 +261,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -364,34 +286,27 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox")
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
cfg := dropbox.Config{
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
HeaderGenerator: f.headerGenerator,
}
// unauthorized config for endpoints that fail with auth
ucfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
}
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
f.team = team.New(cfg)
f.team = team.New(config)
if opt.Impersonate != "" {
user := team.UserSelectorArg{
Email: opt.Impersonate,
}
@@ -406,71 +321,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(cfg)
f.svc = files.New(ucfg)
f.sharing = sharing.New(cfg)
f.users = users.New(cfg)
f.srv = files.New(config)
f.sharing = sharing.New(config)
f.users = users.New(config)
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: false,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
})
// do not fill features yet
if f.opt.SharedFiles {
f.setRoot(root)
if f.root == "" {
return f, nil
}
_, err := f.findSharedFile(f.root)
f.root = ""
if err == nil {
return f, fs.ErrorIsFile
}
return f, nil
}
if f.opt.SharedFolders {
f.setRoot(root)
if f.root == "" {
return f, nil // our root it empty so we probably want to list shared folders
}
dir := path.Dir(f.root)
if dir == "." {
dir = f.root
}
// root is not empty so we have find the right shared folder if it exists
id, err := f.findSharedFolder(dir)
if err != nil {
// if we didn't find the specified shared folder we have to bail out here
return nil, err
}
// we found the specified shared folder so let's mount it
// this will add it to the users normal root namespace and allows us
// to actually perform operations on it using the normal api endpoints.
err = f.mountSharedFolder(id)
if err != nil {
switch e := err.(type) {
case sharing.MountFolderAPIError:
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
return nil, err
}
default:
return nil, err
}
// if the moint failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally
f.opt.SharedFolders = false
}
f.features.Fill(ctx, f)
}).Fill(f)
f.setRoot(root)
// If root starts with / then use the actual root
if strings.HasPrefix(root, "/") {
@@ -492,7 +354,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(f.slashRoot)
@@ -603,150 +464,9 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if f.opt.SharedFiles {
return f.findSharedFile(remote)
}
return f.newObjectWithInfo(remote, nil)
}
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFoldersResult
for {
if !started {
arg := sharing.ListFoldersArgs{
Limit: 100,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFolders(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
started = true
} else {
arg := sharing.ListFoldersContinueArg{
Cursor: res.Cursor,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFoldersContinue(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
}
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d)
if err != nil {
return nil, err
}
}
if res.Cursor == "" {
break
}
}
return entries, nil
}
// findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(name string) (id string, err error) {
entries, err := f.listSharedFolders()
if err != nil {
return "", err
}
for _, entry := range entries {
if entry.(*fs.Dir).Remote() == name {
return entry.(*fs.Dir).ID(), nil
}
}
return "", fs.ErrorDirNotFound
}
// mountSharedFolder mount a shared folder to the root namespace
func (f *Fs) mountSharedFolder(id string) error {
arg := sharing.MountFolderArg{
SharedFolderId: id,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.sharing.MountFolder(&arg)
return shouldRetry(err)
})
return err
}
// listReceivedFiles lists shared the user as access to (note this means individual
// files not files contained in shared folders)
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFilesResult
for {
if !started {
arg := sharing.ListFilesArg{
Limit: 100,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFiles(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
started = true
} else {
arg := sharing.ListFilesContinueArg{
Cursor: res.Cursor,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFilesContinue(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
}
}
for _, entry := range res.Entries {
fmt.Printf("%+v\n", entry)
entryPath := entry.Name
o := &Object{
fs: f,
url: entry.PreviewUrl,
remote: entryPath,
modTime: entry.TimeInvited,
}
if err != nil {
return nil, err
}
entries = append(entries, o)
}
if res.Cursor == "" {
break
}
}
return entries, nil
}
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
files, err := f.listReceivedFiles()
if err != nil {
return nil, err
}
for _, entry := range files {
if entry.(*Object).remote == name {
return entry.(*Object), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
@@ -757,13 +477,6 @@ func (f *Fs) findSharedFile(name string) (o *Object, err error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFiles {
return f.listReceivedFiles()
}
if f.opt.SharedFolders {
return f.listSharedFolders()
}
root := f.slashRoot
if dir != "" {
root += "/" + dir
@@ -827,7 +540,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
d := fs.NewDir(remote, time.Now())
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(remote, fileInfo)
@@ -850,9 +563,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.SharedFiles || f.opt.SharedFolders {
return nil, errNotSupportedInSharedMode
}
// Temporary Object under construction
o := &Object{
fs: f,
@@ -868,9 +578,6 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if f.opt.SharedFiles || f.opt.SharedFolders {
return errNotSupportedInSharedMode
}
root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root
@@ -890,10 +597,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
arg2 := files.CreateFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
}
// Don't attempt to create filenames that are too long
if cErr := checkPathLength(arg2.Path); cErr != nil {
return cErr
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2)
return shouldRetry(err)
@@ -901,9 +604,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.slashRoot, dir)
// can't remove root
@@ -911,33 +615,31 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return errors.New("can't remove root directory")
}
if check {
// check directory exists
_, err = f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
// check directory exists
_, err := f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
}
// remove it
@@ -948,22 +650,12 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if f.opt.SharedFiles || f.opt.SharedFolders {
return errNotSupportedInSharedMode
}
return f.purgeCheck(ctx, dir, true)
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
@@ -1020,11 +712,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) (err error) {
// Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
})
return shouldRetry(err)
})
return err
}
// Move src to this remote using server-side move operations.
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
@@ -1076,17 +775,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath,
// FIXME this gives settings_error/not_authorized/.. errors
// and the expires setting isn't in the documentation so remove
// for now.
// Settings: &sharing.SharedLinkSettings{
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
// },
}
var linkRes sharing.IsSharedLinkMetadata
err = f.pacer.Call(func() (bool, error) {
@@ -1129,7 +822,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1201,159 +894,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return usage, nil
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartCursor early so all changes from now on get processed
startCursor, err := f.changeNotifyCursor()
if err != nil {
fs.Infof(f, "Failed to get StartCursor: %s", err)
}
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if startCursor == "" {
startCursor, err = f.changeNotifyCursor()
if err != nil {
fs.Infof(f, "Failed to get StartCursor: %s", err)
continue
}
}
fs.Debugf(f, "Checking for changes on remote")
startCursor, err = f.changeNotifyRunner(ctx, notifyFunc, startCursor)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyCursor() (cursor string, err error) {
var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) {
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
Recursive: true,
}
if arg.Path == "/" {
arg.Path = ""
}
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(err)
})
if err != nil {
return
}
return startCursor.Cursor, nil
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startCursor string) (newCursor string, err error) {
cursor := startCursor
var res *files.ListFolderLongpollResult
// Dropbox sets a timeout range of 30 - 480
timeout := uint64(f.ci.TimeoutOrInfinite() / time.Second)
if timeout < 30 {
timeout = 30
}
if timeout > 480 {
timeout = 480
}
err = f.pacer.Call(func() (bool, error) {
args := files.ListFolderLongpollArg{
Cursor: cursor,
Timeout: timeout,
}
res, err = f.svc.ListFolderLongpoll(&args)
return shouldRetry(err)
})
if err != nil {
return
}
if !res.Changes {
return cursor, nil
}
if res.Backoff != 0 {
fs.Debugf(f, "Waiting to poll for %d seconds", res.Backoff)
time.Sleep(time.Duration(res.Backoff) * time.Second)
}
for {
var changeList *files.ListFolderResult
arg := files.ListFolderContinueArg{
Cursor: cursor,
}
err = f.pacer.Call(func() (bool, error) {
changeList, err = f.srv.ListFolderContinue(&arg)
return shouldRetry(err)
})
if err != nil {
return "", errors.Wrap(err, "list continue")
}
cursor = changeList.Cursor
var entryType fs.EntryType
for _, entry := range changeList.Entries {
entryPath := ""
switch info := entry.(type) {
case *files.FolderMetadata:
entryType = fs.EntryDirectory
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.FileMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.DeletedMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
default:
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
continue
}
if entryPath != "" {
notifyFunc(entryPath, entryType)
}
}
if !changeList.HasMore {
break
}
}
return cursor, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
@@ -1379,16 +919,8 @@ func (o *Object) Remote() string {
return o.remote
}
// ID returns the object id
func (o *Object) ID() string {
return o.id
}
// Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return "", errNotSupportedInSharedMode
}
if t != DbHashType {
return "", hash.ErrUnsupported
}
@@ -1406,9 +938,8 @@ func (o *Object) Size() int64 {
// setMetadataFromEntry sets the fs data from a files.FileMetadata
//
// This isn't a complete set of metadata and has an inaccurate date
// This isn't a complete set of metadata and has an inacurate date
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.id = info.Id
o.bytes = int64(info.Size)
o.modTime = info.ClientModified
o.hash = info.ContentHash
@@ -1477,27 +1008,10 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles {
if len(options) != 0 {
return nil, errors.New("OpenOptions not supported for shared files")
}
arg := sharing.GetSharedLinkMetadataArg{
Url: o.url,
}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
return
}
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{
Path: o.id,
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
ExtraHeaders: headers,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1625,40 +1139,12 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
return entry, nil
}
// checks all the parts of name to see they are below
// maxFileNameLength runes.
//
// This checks the length as runes which isn't quite right as dropbox
// seems to encode some symbols (eg ☺) as two "characters". This seems
// like utf-16 except that ☺ doesn't need two characters in utf-16.
//
// Using runes instead of what dropbox is using will work for most
// cases, and when it goes wrong we will upload something we should
// have detected as too long which is the least damaging way to fail.
func checkPathLength(name string) (err error) {
for next := ""; len(name) > 0; name = next {
if slash := strings.IndexRune(name, '/'); slash >= 0 {
name, next = name[:slash], name[slash+1:]
} else {
next = ""
}
length := utf8.RuneCountInString(name)
if length > maxFileNameLength {
return fserrors.NoRetryError(fs.ErrorFileNameTooLong)
}
}
return nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return errNotSupportedInSharedMode
}
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
@@ -1667,10 +1153,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
// Don't attempt to create filenames that are too long
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
return cErr
}
size := src.Size()
var err error
@@ -1691,9 +1173,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return errNotSupportedInSharedMode
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
@@ -1714,5 +1193,4 @@ var (
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,44 +0,0 @@
package dropbox
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestInternalCheckPathLength(t *testing.T) {
rep := func(n int, r rune) (out string) {
rs := make([]rune, n)
for i := range rs {
rs[i] = r
}
return string(rs)
}
for _, test := range []struct {
in string
ok bool
}{
{in: "", ok: true},
{in: rep(maxFileNameLength, 'a'), ok: true},
{in: rep(maxFileNameLength+1, 'a'), ok: false},
{in: rep(maxFileNameLength, '£'), ok: true},
{in: rep(maxFileNameLength+1, '£'), ok: false},
{in: rep(maxFileNameLength, '☺'), ok: true},
{in: rep(maxFileNameLength+1, '☺'), ok: false},
{in: rep(maxFileNameLength, '你'), ok: true},
{in: rep(maxFileNameLength+1, '你'), ok: false},
{in: "/ok/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
} {
err := checkPathLength(test.in)
assert.Equal(t, test.ok, err == nil, test.in)
}
}

View File

@@ -6,7 +6,6 @@ import (
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
@@ -18,7 +17,6 @@ import (
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
403, // Forbidden (may happen when request limit is exceeded)
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
@@ -29,60 +27,11 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, leaf, directoryID, nil
}
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
request := FileInfoRequest{
URL: url,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/info.cgi",
}
var file File
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
}
return &file, err
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
@@ -198,6 +147,11 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -343,56 +297,6 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
return response, nil
}
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) {
request := &MoveFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/mv.cgi",
}
response = &MoveFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/cp.cgi",
}
response = &CopyFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
@@ -416,7 +320,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = f.opt.Enc.FromStandardName(fileName)
@@ -434,7 +338,6 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
NoResponse: true,
Body: in,
ContentLength: &size,
Options: options,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{

View File

@@ -23,19 +23,18 @@ import (
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 400 * time.Millisecond // api is extremely rate limited now
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
attackConstant = 0 // start with max sleep
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(ctx context.Context, name string, config configmap.Mapper) {
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{{
@@ -167,7 +166,7 @@ func (f *Fs) Features() *fs.Features {
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
@@ -186,17 +185,16 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
}).Fill(ctx, f)
}).Fill(f)
client := fshttp.NewClient(ctx)
client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
@@ -204,6 +202,8 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
@@ -226,7 +226,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
}
return nil, err
}
f.features.Fill(ctx, &tempF)
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -263,7 +263,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -297,7 +297,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
@@ -305,10 +305,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.NewObject(ctx, src.Remote())
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
@@ -322,7 +322,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
@@ -333,12 +333,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
@@ -363,6 +363,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
@@ -387,7 +388,13 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -395,6 +402,11 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -415,79 +427,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return nil
}
// Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0])
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -4,11 +4,13 @@ package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})

View File

@@ -72,10 +72,6 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
func (o *Object) setMetaData(file File) {
o.file = file
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size)
@@ -105,7 +101,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -1,10 +1,5 @@
package fichier
// FileInfoRequest is the request structure of the corresponding request
type FileInfoRequest struct {
URL string `json:"url"`
}
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
@@ -54,39 +49,6 @@ type MakeFolderResponse struct {
FolderID int `json:"folder_id"`
}
// MoveFileRequest is the request structure of the corresponding request
type MoveFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"destination_folder_id"`
Rename string `json:"rename,omitempty"`
}
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"folder_id"`
Rename string `json:"rename,omitempty"`
}
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
@@ -124,6 +86,7 @@ type EndFileUploadResponse struct {
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`

View File

@@ -1,391 +0,0 @@
// Package api has type definitions for filefabric
//
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
package api
import (
"bytes"
"fmt"
"reflect"
"strings"
"time"
)
const (
// TimeFormat for parameters (UTC)
timeFormatParameters = `2006-01-02 15:04:05`
// "2020-08-11 10:10:04" for JSON parsing
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents represents date and time information for the
// filefabric API
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
return []byte(timeString), nil
}
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
// UnmarshalJSON turns JSON into a Time (in UTC)
func (t *Time) UnmarshalJSON(data []byte) error {
// Set a Zero time.Time if we receive a zero time input
if bytes.Equal(data, zeroTime) {
*t = Time(time.Time{})
return nil
}
newT, err := time.Parse(timeFormatJSON, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// String turns a Time into a string in UTC suitable for the API
// parameters
func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
Message string `json:"statusmessage"`
TaskID string `json:"taskid"`
// Warning string `json:"warning"` // obsolete
}
// Status statisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}
// OK returns true if the status is all good
func (e *Status) OK() bool {
return e.Code == "ok"
}
// GetCode returns the status code if any
func (e *Status) GetCode() string {
return e.Code
}
// OKError defines an interface for items which can be OK or be an error
type OKError interface {
error
OK() bool
GetCode() string
}
// Check Status satisfies the OKError interface
var _ OKError = (*Status)(nil)
// EmptyResponse is response which just returns the error condition
type EmptyResponse struct {
Status
}
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
type GetTokenByAuthTokenResponse struct {
Status
Token string `json:"token"`
UserID string `json:"userid"`
AllowLoginRemember string `json:"allowloginremember"`
LastLogin Time `json:"lastlogin"`
AutoLoginCode string `json:"autologincode"`
}
// ApplianceInfo is the response to getApplianceInfo
type ApplianceInfo struct {
Status
Sitetitle string `json:"sitetitle"`
OauthLoginSupport string `json:"oauthloginsupport"`
IsAppliance string `json:"isappliance"`
SoftwareVersion string `json:"softwareversion"`
SoftwareVersionLabel string `json:"softwareversionlabel"`
}
// GetFolderContentsResponse is returned from getFolderContents
type GetFolderContentsResponse struct {
Status
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From int `json:"from,string"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
Parents []Item `json:"parents"`
CustomPermissions CustomPermissions `json:"custompermissions"`
}
// ItemType determine whether it is a file or a folder
type ItemType uint8
// Types of things in Item
const (
ItemTypeFile ItemType = 0
ItemTypeFolder ItemType = 1
)
// Item ia a File or a Folder
type Item struct {
ID string `json:"fi_id"`
PID string `json:"fi_pid"`
// UID string `json:"fi_uid"`
Name string `json:"fi_name"`
// S3Name string `json:"fi_s3name"`
// Extension string `json:"fi_extension"`
// Description string `json:"fi_description"`
Type ItemType `json:"fi_type,string"`
// Created Time `json:"fi_created"`
Size int64 `json:"fi_size,string"`
ContentType string `json:"fi_contenttype"`
// Tags string `json:"fi_tags"`
// MainCode string `json:"fi_maincode"`
// Public int `json:"fi_public,string"`
// Provider string `json:"fi_provider"`
// ProviderFolder string `json:"fi_providerfolder"` // folder
// Encrypted int `json:"fi_encrypted,string"`
// StructType string `json:"fi_structtype"`
// Bname string `json:"fi_bname"` // folder
// OrgID string `json:"fi_orgid"`
// Favorite int `json:"fi_favorite,string"`
// IspartOf string `json:"fi_ispartof"` // folder
Modified Time `json:"fi_modified"`
// LastAccessed Time `json:"fi_lastaccessed"`
// Hits int64 `json:"fi_hits,string"`
// IP string `json:"fi_ip"` // folder
// BigDescription string `json:"fi_bigdescription"`
LocalTime Time `json:"fi_localtime"`
// OrgfolderID string `json:"fi_orgfolderid"`
// StorageIP string `json:"fi_storageip"` // folder
// RemoteTime Time `json:"fi_remotetime"`
// ProviderOptions string `json:"fi_provideroptions"`
// Access string `json:"fi_access"`
// Hidden string `json:"fi_hidden"` // folder
// VersionOf string `json:"fi_versionof"`
Trash bool `json:"trash"`
// Isbucket string `json:"isbucket"` // filelist
SubFolders int64 `json:"subfolders"` // folder
}
// ItemFields is a | separated list of fields in Item
var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated
// string.
func fields(opt interface{}) (pipeTags string, err error) {
var tags []string
def := reflect.ValueOf(opt)
defType := def.Type()
for i := 0; i < def.NumField(); i++ {
field := defType.Field(i)
tag, ok := field.Tag.Lookup("json")
if !ok {
continue
}
if comma := strings.IndexRune(tag, ','); comma >= 0 {
tag = tag[:comma]
}
if tag == "" {
continue
}
tags = append(tags, tag)
}
return strings.Join(tags, "|"), nil
}
// mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure.
func mustFields(opt interface{}) string {
tags, err := fields(opt)
if err != nil {
panic(err)
}
return tags
}
// CustomPermissions is returned as part of GetFolderContentsResponse
type CustomPermissions struct {
Upload string `json:"upload"`
CreateSubFolder string `json:"createsubfolder"`
Rename string `json:"rename"`
Delete string `json:"delete"`
Move string `json:"move"`
ManagePermissions string `json:"managepermissions"`
ListOnly string `json:"listonly"`
VisibleInTrash string `json:"visibleintrash"`
}
// DoCreateNewFolderResponse is response from foCreateNewFolder
type DoCreateNewFolderResponse struct {
Status
Item Item `json:"file"`
}
// DoInitUploadResponse is response from doInitUpload
type DoInitUploadResponse struct {
Status
ProviderID string `json:"providerid"`
UploadCode string `json:"uploadcode"`
FileType string `json:"filetype"`
DirectUploadSupport string `json:"directuploadsupport"`
ResumeAllowed string `json:"resumeallowed"`
}
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
//
// Sometimes the response is returned as XML and sometimes as JSON
type UploaderResponse struct {
FileSize int64 `xml:"filesize" json:"filesize,string"`
MD5 string `xml:"md5" json:"md5"`
Success string `xml:"success" json:"success"`
}
// UploadStatus is returned from getUploadStatus
type UploadStatus struct {
Status
UploadCode string `json:"uploadcode"`
Metafile string `json:"metafile"`
Percent int `json:"percent,string"`
Uploaded int64 `json:"uploaded,string"`
Size int64 `json:"size,string"`
Filename string `json:"filename"`
Nofile string `json:"nofile"`
Completed string `json:"completed"`
Completsuccess string `json:"completsuccess"`
Completerror string `json:"completerror"`
}
// DoCompleteUploadResponse is the response to doCompleteUpload
type DoCompleteUploadResponse struct {
Status
UploadedSize int64 `json:"uploadedsize,string"`
StorageIP string `json:"storageip"`
UploadedName string `json:"uploadedname"`
// Versioned []interface{} `json:"versioned"`
// VersionedID int `json:"versionedid"`
// Comment interface{} `json:"comment"`
File Item `json:"file"`
// UsSize string `json:"us_size"`
// PaSize string `json:"pa_size"`
// SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// Providers is returned as part of UploadResponse
type Providers struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Private string `json:"private"`
Limit string `json:"limit"`
Percent int `json:"percent"`
}
// Total is returned as part of UploadResponse
type Total struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Priused string `json:"priused"`
Primax string `json:"primax"`
Limit string `json:"limit"`
Percent int `json:"percent"`
Pripercent int `json:"pripercent"`
}
// UploadResponse is returned as part of SpaceInfo
type UploadResponse struct {
Providers []Providers `json:"providers"`
Total Total `json:"total"`
}
// SpaceInfo is returned as part of DoCompleteUploadResponse
type SpaceInfo struct {
Response UploadResponse `json:"response"`
Status string `json:"status"`
}
// DeleteResponse is returned from doDeleteFile
type DeleteResponse struct {
Status
Deleted []string `json:"deleted"`
Errors []interface{} `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// FileResponse is returned from doRenameFile
type FileResponse struct {
Status
Item Item `json:"file"`
Exists string `json:"exists"`
}
// MoveFilesResponse is returned from doMoveFiles
type MoveFilesResponse struct {
Status
Filesleft string `json:"filesleft"`
Addedtobackground string `json:"addedtobackground"`
Moved string `json:"moved"`
Item Item `json:"file"`
IDs []string `json:"fi_ids"`
Length int `json:"length"`
DirID string `json:"dir_id"`
MovedObjects []Item `json:"movedobjects"`
// FolderTasks []interface{} `json:"foldertasks"`
}
// TasksResponse is the response to getUserBackgroundTasks
type TasksResponse struct {
Status
Tasks []Task `json:"tasks"`
Total string `json:"total"`
}
// BtData is part of TasksResponse
type BtData struct {
Callback string `json:"callback"`
}
// Task describes a task returned in TasksResponse
type Task struct {
BtID string `json:"bt_id"`
UsID string `json:"us_id"`
BtType string `json:"bt_type"`
BtData BtData `json:"bt_data"`
BtStatustext string `json:"bt_statustext"`
BtStatusdata string `json:"bt_statusdata"`
BtMessage string `json:"bt_message"`
BtProcent string `json:"bt_procent"`
BtAdded string `json:"bt_added"`
BtStatus string `json:"bt_status"`
BtCompleted string `json:"bt_completed"`
BtTitle string `json:"bt_title"`
BtCredentials string `json:"bt_credentials"`
BtHidden string `json:"bt_hidden"`
BtAutoremove string `json:"bt_autoremove"`
BtDevsite string `json:"bt_devsite"`
BtPriority string `json:"bt_priority"`
BtReport string `json:"bt_report"`
BtSitemarker string `json:"bt_sitemarker"`
BtExecuteafter string `json:"bt_executeafter"`
BtCompletestatus string `json:"bt_completestatus"`
BtSubtype string `json:"bt_subtype"`
BtCanceled string `json:"bt_canceled"`
Callback string `json:"callback"`
CanBeCanceled bool `json:"canbecanceled"`
CanBeRestarted bool `json:"canberestarted"`
Type string `json:"type"`
Status string `json:"status"`
Settings string `json:"settings"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test filefabric filesystem interface
package filefabric_test
import (
"testing"
"github.com/rclone/rclone/backend/filefabric"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFileFabric:",
NilObject: (*filefabric.Object)(nil),
})
}

View File

@@ -5,34 +5,25 @@ import (
"context"
"crypto/tls"
"io"
"net"
"net/textproto"
"os"
"path"
"runtime"
"strings"
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
var (
currentUser = env.CurrentUser()
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -49,7 +40,7 @@ func init() {
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser,
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
@@ -59,19 +50,8 @@ func init() {
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS)
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use Explicit FTPS (FTP over TLS)
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, {
Name: "concurrency",
@@ -88,27 +68,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance
// The FTP protocal can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
@@ -126,31 +90,25 @@ type Options struct {
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
IdleTimeout fs.Duration `config:"idle_timeout"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote FTP server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
url string
user string
pass string
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
}
// Object describes an FTP file
@@ -190,83 +148,20 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Enable debugging output
type debugLog struct {
mu sync.Mutex
auth bool
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
//
// This writes debug info to the log
func (dl *debugLog) Write(p []byte) (n int, err error) {
dl.mu.Lock()
defer dl.mu.Unlock()
_, file, _, ok := runtime.Caller(1)
direction := "FTP Rx"
if ok && strings.Contains(file, "multi") {
direction = "FTP Tx"
}
lines := strings.Split(string(p), "\r\n")
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
for _, line := range lines {
if !dl.auth && strings.HasPrefix(line, "PASS") {
fs.Debugf(direction, "PASS *****")
continue
}
fs.Debugf(direction, "%q", line)
}
return len(p), nil
}
type dialCtx struct {
f *Fs
ctx context.Context
}
// dial a new connection with fshttp dialer
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
if err != nil {
return nil, err
}
if d.f.tlsConf != nil {
conn = tls.Client(conn, d.f.tlsConf)
}
return conn, err
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
dCtx := dialCtx{f, ctx}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
@@ -282,11 +177,10 @@ func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
}
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
accounting.LimitTPS(ctx)
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
@@ -296,11 +190,7 @@ func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error
if c != nil {
return c, nil
}
c, err = f.ftpConnection(ctx)
if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put()
}
return c, err
return f.ftpConnection()
}
// Return an FTP connection to the pool
@@ -313,13 +203,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
if pc == nil {
return
}
c := *pc
if c == nil {
return
}
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
@@ -335,34 +219,12 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
f.poolMu.Unlock()
}
// Drain the pool of any connections
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
if f.opt.IdleTimeout > 0 {
f.drain.Stop()
}
if len(f.pool) != 0 {
fs.Debugf(f, "closing %d unused connections", len(f.pool))
}
for i, c := range f.pool {
if cErr := c.Quit(); cErr != nil {
err = cErr
}
f.pool[i] = nil
}
f.pool = nil
return err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
opt := new(Options)
@@ -376,7 +238,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
user := opt.User
if user == "" {
user = currentUser
user = os.Getenv("USER")
}
port := opt.Port
if port == "" {
@@ -388,39 +250,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS {
protocol = "ftps://"
}
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
url: u,
user: user,
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
}
}).Fill(f)
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
@@ -447,12 +292,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
return f, err
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.drainPool(ctx)
}
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
switch errX := err.(type) {
@@ -497,7 +336,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
}
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
@@ -511,7 +350,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
@@ -533,7 +372,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, remote)
entry, err := f.findItem(remote)
if err != nil {
return nil, err
}
@@ -555,8 +394,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
}
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
func (f *Fs) dirExists(remote string) (exists bool, err error) {
entry, err := f.findItem(remote)
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
@@ -577,7 +416,7 @@ func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err err
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection(ctx)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "list")
}
@@ -598,7 +437,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
timer := time.NewTimer(fs.Config.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
@@ -615,7 +454,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// doesn't exist, so check it really doesn't exist if no
// entries found.
if len(files) == 0 {
exists, err := f.dirExists(ctx, dir)
exists, err := f.dirExists(dir)
if err != nil {
return nil, errors.Wrap(err, "list")
}
@@ -668,7 +507,7 @@ func (f *Fs) Precision() time.Duration {
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(ctx, src.Remote())
err := f.mkParentDir(src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
}
@@ -686,12 +525,12 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
@@ -718,12 +557,12 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
}
// mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
func (f *Fs) mkdir(abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" {
return nil
}
fi, err := f.getInfo(ctx, abspath)
fi, err := f.getInfo(abspath)
if err == nil {
if fi.IsDir {
return nil
@@ -733,11 +572,11 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
return errors.Wrapf(err, "mkdir %q failed", abspath)
}
parent := path.Dir(abspath)
err = f.mkdir(ctx, parent)
err = f.mkdir(parent)
if err != nil {
return err
}
c, connErr := f.getFtpConnection(ctx)
c, connErr := f.getFtpConnection()
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
@@ -757,23 +596,23 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
// mkParentDir makes the parent of remote if necessary and any
// directories above that
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
func (f *Fs) mkParentDir(remote string) error {
parent := path.Dir(remote)
return f.mkdir(ctx, path.Join(f.root, parent))
return f.mkdir(path.Join(f.root, parent))
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir)
return f.mkdir(ctx, root)
return f.mkdir(root)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection(ctx)
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
@@ -789,11 +628,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(ctx, remote)
err := f.mkParentDir(remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
}
c, err := f.getFtpConnection(ctx)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "Move")
}
@@ -813,7 +652,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -830,7 +669,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.root, dstRemote)
// Check if destination exists
fi, err := f.getInfo(ctx, dstPath)
fi, err := f.getInfo(dstPath)
if err == nil {
if fi.IsDir {
return fs.ErrorDirExists
@@ -841,13 +680,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath))
err = f.mkdir(path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
}
// Do the move
c, err := f.getFtpConnection(ctx)
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(err, "DirMove")
}
@@ -939,23 +778,19 @@ func (f *ftpReadCloser) Close() error {
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
f.f.putFtpConnection(nil, nil)
return nil
}
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
f.f.putFtpConnection(nil, nil)
} else {
f.f.putFtpConnection(&f.c, nil)
}
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
err = nil
}
}
@@ -979,7 +814,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
c, err := o.fs.getFtpConnection(ctx)
c, err := o.fs.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "open")
}
@@ -1014,7 +849,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Removed after failed upload: %v", err)
}
}
c, err := o.fs.getFtpConnection(ctx)
c, err := o.fs.getFtpConnection()
if err != nil {
return errors.Wrap(err, "Update")
}
@@ -1022,11 +857,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
o.info, err = o.fs.getInfo(ctx, path)
o.info, err = o.fs.getInfo(path)
if err != nil {
return errors.Wrap(err, "update getinfo")
}
@@ -1038,14 +872,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file
info, err := o.fs.getInfo(ctx, path)
info, err := o.fs.getInfo(path)
if err != nil {
return err
}
if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote)
} else {
c, err := o.fs.getFtpConnection(ctx)
c, err := o.fs.getFtpConnection()
if err != nil {
return errors.Wrap(err, "Remove")
}
@@ -1061,6 +895,5 @@ var (
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Shutdowner = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -21,6 +21,7 @@ import (
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
@@ -37,7 +38,6 @@ import (
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
@@ -76,32 +76,33 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
if saFile != "" || saCreds != "" {
return
}
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
@@ -241,9 +242,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "COLDLINE",
Help: "Coldline storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage class",
}, {
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
@@ -255,7 +253,7 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -264,7 +262,6 @@ type Options struct {
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
@@ -370,12 +367,12 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
@@ -386,7 +383,8 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
var oAuthClient *http.Client
// Parse config into Options struct
@@ -404,21 +402,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.Anonymous {
oAuthClient = fshttp.NewClient(ctx)
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
if err != nil {
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
@@ -432,7 +428,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
@@ -441,7 +437,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(ctx, f)
}).Fill(f)
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -559,12 +555,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory {
if isDirectory && object.Size == 0 {
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -812,7 +808,7 @@ func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
@@ -840,27 +836,20 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
}
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
}
if rewriteResponse.Done {
break
}
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(rewriteResponse.Resource)
dstObj.setMetaData(newObject)
return dstObj, nil
}
@@ -1028,10 +1017,11 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
@@ -1073,35 +1063,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
object.CacheControl = value
case "content-disposition":
object.ContentDisposition = value
case "content-encoding":
object.ContentEncoding = value
case "content-language":
object.ContentLanguage = value
case "content-type":
object.ContentType = value
case "x-goog-storage-class":
object.StorageClass = value
default:
const googMetaPrefix = "x-goog-meta-"
if strings.HasPrefix(lowerKey, googMetaPrefix) {
metaKey := lowerKey[len(googMetaPrefix):]
object.Metadata[metaKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)

View File

@@ -30,7 +30,7 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"one": []*api.Album{a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -39,7 +39,7 @@ func TestAlbumsAdd(t *testing.T) {
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one"},
"": []string{"one"},
}, albums.path)
a2 := &api.Album{
@@ -49,8 +49,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2},
"one": []*api.Album{a1},
"two": []*api.Album{a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -61,7 +61,7 @@ func TestAlbumsAdd(t *testing.T) {
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two"},
"": []string{"one", "two"},
}, albums.path)
// Add a duplicate
@@ -72,8 +72,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -86,7 +86,7 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"": []string{"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
@@ -97,9 +97,9 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -114,8 +114,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
// Add a weird path
@@ -126,10 +126,10 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": {a0},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
@@ -146,8 +146,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}", "{0}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
}, albums.path)
}
@@ -181,9 +181,9 @@ func TestAlbumsDel(t *testing.T) {
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -198,16 +198,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
@@ -220,16 +220,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
@@ -240,16 +240,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
@@ -258,16 +258,16 @@ func TestAlbumsDel(t *testing.T) {
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one"},
"one": {"sub"},
"": []string{"one"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)

View File

@@ -17,7 +17,7 @@ type Error struct {
Details ErrorDetails `json:"error"`
}
// Error satisfies error interface
// Error statisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -78,7 +79,7 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -95,7 +96,7 @@ func init() {
}
// Do the oauth
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
err = oauthutil.Config("google photos", name, m, oauthConfig)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
@@ -109,7 +110,13 @@ func init() {
`)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -127,38 +134,14 @@ rclone mount needs to know the size of files in advance of reading
them, so setting this flag when using rclone mount is recommended if
you want to read the media.`,
Advanced: true,
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}, {
Name: "include_archived",
Default: false,
Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter
their archive status.
With this flag, archived media are always visible in directory
listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}}...),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
}
// Fs represents a remote storage server
@@ -219,15 +202,6 @@ func (f *Fs) dirTime() time.Time {
return f.startTime
}
// startYear returns the start year
func (f *Fs) startYear() int {
return f.opt.StartYear
}
func (f *Fs) includeArchived() bool {
return f.opt.IncludeArchived
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
@@ -250,10 +224,6 @@ func errorHandler(resp *http.Response) error {
if err != nil {
body = nil
}
// Google sends 404 messages as images so be prepared for that
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
var e = api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
@@ -268,7 +238,7 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -276,8 +246,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
baseClient := fshttp.NewClient(fs.Config)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
@@ -294,14 +264,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
unAuth: rest.NewClient(baseClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
ts: ts,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(),
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(ctx, f)
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
_, _, pattern := patterns.match(f.root, "", true)
@@ -310,7 +280,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var leaf string
f.root, leaf = path.Split(f.root)
f.root = strings.TrimRight(f.root, "/")
_, err := f.NewObject(ctx, leaf)
_, err := f.NewObject(context.TODO(), leaf)
if err == nil {
return f, fs.ErrorIsFile
}
@@ -519,12 +489,6 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
}
filter.PageSize = listChunks
filter.PageToken = ""
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
if filter.Filters == nil {
filter.Filters = &api.Filters{}
}
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
}
lastID := ""
for {
var result api.MediaItems
@@ -979,9 +943,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the media item in exchange for an UploadToken
opts := rest.Opts{
Method: "POST",
Path: "/uploads",
Options: options,
Method: "POST",
Path: "/uploads",
ExtraHeaders: map[string]string{
"X-Goog-Upload-File-Name": fileName,
"X-Goog-Upload-Protocol": "raw",
@@ -1040,9 +1003,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Add upload to internal storage
if pattern.isUpload {
o.fs.uploadedMu.Lock()
o.fs.uploaded.AddEntry(o)
o.fs.uploadedMu.Unlock()
}
return nil
}

View File

@@ -35,14 +35,14 @@ func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(ctx, *fstest.RemoteName)
f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs(ctx, "testfiles")
localFs, err := fs.NewFs("testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year hierarchy
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
@@ -155,7 +155,7 @@ func TestIntegration(t *testing.T) {
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote)
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)

View File

@@ -23,8 +23,6 @@ type lister interface {
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
startYear() int
includeArchived() bool
}
// dirPattern describes a single directory pattern
@@ -54,7 +52,6 @@ var patterns = dirPatterns{
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
fs.NewDir(prefix+"feature", f.dirTime()),
}, nil
},
},
@@ -192,28 +189,6 @@ var patterns = dirPatterns{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
{
re: `^feature$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return fs.DirEntries{
fs.NewDir(prefix+"favorites", f.dirTime()),
}, nil
},
},
{
re: `^feature/favorites$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
filter := featureFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^feature/favorites/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
@@ -225,7 +200,7 @@ func (ds dirPatterns) mustCompile() dirPatterns {
return ds
}
// match finds the path passed in the matching structure and
// match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
@@ -247,10 +222,11 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
return nil, "", nil
}
// Return the years from startYear to today
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := f.startYear(); year <= currentYear; year++ {
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
@@ -314,24 +290,6 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
return sf, nil
}
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
sf = api.SearchFilter{
Filters: &api.Filters{
FeatureFilter: &api.FeatureFilter{
IncludedFeatures: []string{
"FAVORITES",
},
},
},
}
return sf
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path

View File

@@ -59,16 +59,6 @@ func (f *testLister) dirTime() time.Time {
return startTime
}
// mock startYear for testing
func (f *testLister) startYear() int {
return 2000
}
// mock includeArchived for testing
func (f *testLister) includeArchived() bool {
return false
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
@@ -160,38 +150,6 @@ func TestPatternMatch(t *testing.T) {
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
{
root: "",
itemPath: "feature",
isFile: false,
wantMatch: []string{"feature"},
wantPrefix: "feature/",
wantPattern: &patterns[23],
},
{
root: "feature/favorites",
itemPath: "",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "",
wantPattern: &patterns[24],
},
{
root: "feature",
itemPath: "favorites",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "favorites/",
wantPattern: &patterns[24],
},
{
root: "feature/favorites",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"feature/favorites/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[25],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)

View File

@@ -1,320 +0,0 @@
// +build !plan9
package hdfs
import (
"context"
"fmt"
"io"
"os"
"os/user"
"path"
"strings"
"time"
"github.com/colinmarc/hdfs/v2"
krb "github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
)
// Fs represents a HDFS server
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
}
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
if configPath == "" {
configPath = "/etc/krb5.conf"
}
cfg, err := config.Load(configPath)
if err != nil {
return nil, err
}
// Determine the ccache location from the environment, falling back to the
// default location.
ccachePath := os.Getenv("KRB5CCNAME")
if strings.Contains(ccachePath, ":") {
if strings.HasPrefix(ccachePath, "FILE:") {
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
} else {
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
}
} else if ccachePath == "" {
u, err := user.Current()
if err != nil {
return nil, err
}
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
}
ccache, err := credentials.LoadCCache(ccachePath)
if err != nil {
return nil, err
}
client, err := krb.NewFromCCache(ccache, cfg)
if err != nil {
return nil, err
}
return client, nil
}
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
options := hdfs.ClientOptions{
Addresses: []string{opt.Namenode},
UseDatanodeHostname: false,
}
if opt.ServicePrincipalName != "" {
options.KerberosClient, err = getKerberosClient()
if err != nil {
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
}
options.KerberosServicePrincipleName = opt.ServicePrincipalName
if opt.DataTransferProtection != "" {
options.DataTransferProtection = opt.DataTransferProtection
}
} else {
options.User = opt.Username
}
client, err := hdfs.NewClient(options)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
info, err := f.client.Stat(f.realpath(""))
if err == nil && !info.IsDir() {
f.root = path.Dir(f.root)
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of this fs
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes are not supported
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// NewObject finds file at remote or return fs.ErrorObjectNotFound
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
realpath := f.realpath(remote)
fs.Debugf(f, "new [%s]", realpath)
info, err := f.ensureFile(realpath)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
realpath := f.realpath(dir)
fs.Debugf(f, "list [%s]", realpath)
err = f.ensureDirectory(realpath)
if err != nil {
return nil, err
}
list, err := f.client.ReadDir(realpath)
if err != nil {
return nil, err
}
for _, x := range list {
stdName := f.opt.Enc.ToStandardName(x.Name())
remote := path.Join(dir, stdName)
if x.IsDir() {
entries = append(entries, fs.NewDir(remote, x.ModTime()))
} else {
entries = append(entries, &Object{
fs: f,
remote: remote,
size: x.Size(),
modTime: x.ModTime()})
}
}
return entries, nil
}
// Put the object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
}
err := o.Update(ctx, in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir makes a directory
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
return f.client.MkdirAll(f.realpath(dir), 0755)
}
// Rmdir deletes the directory
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "rmdir [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
// do not remove empty directory
list, err := f.client.ReadDir(realpath)
if err != nil {
return err
}
if len(list) > 0 {
return fs.ErrorDirectoryNotEmpty
}
return f.client.Remove(realpath)
}
// Purge deletes all the files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "purge [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
return f.client.RemoveAll(realpath)
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs()
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil
}
func (f *Fs) ensureDirectory(realpath string) error {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return fs.ErrorDirNotFound
}
if err != nil {
return err
}
if !info.IsDir() {
return fs.ErrorDirNotFound
}
return nil
}
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return nil, fs.ErrorObjectNotFound
}
if err != nil {
return nil, err
}
if info.IsDir() {
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
func (f *Fs) realpath(dir string) string {
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
)

View File

@@ -1,86 +0,0 @@
// +build !plan9
package hdfs
import (
"path"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
)
func init() {
fsi := &fs.RegInfo{
Name: "hdfs",
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "hadoop name node and port",
Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, {
Name: "username",
Help: "hadoop user name",
Required: false,
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root",
}},
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode
Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",
}},
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
}},
}
fs.Register(fsi)
}
// Options for this backend
type Options struct {
Namenode string `config:"namenode"`
Username string `config:"username"`
ServicePrincipalName string `config:"service_principal_name"`
DataTransferProtection string `config:"data_transfer_protection"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// xPath make correct file path with leading '/'
func xPath(root string, tail string) string {
if !strings.HasPrefix(root, "/") {
root = "/" + root
}
return path.Join(root, tail)
}

View File

@@ -1,20 +0,0 @@
// Test HDFS filesystem interface
// +build !plan9
package hdfs_test
import (
"testing"
"github.com/rclone/rclone/backend/hdfs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHdfs:",
NilObject: (*hdfs.Object)(nil),
})
}

View File

@@ -1,6 +0,0 @@
// Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package hdfs

View File

@@ -1,177 +0,0 @@
// +build !plan9
package hdfs
import (
"context"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
)
// Object describes an HDFS file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
realpath := o.fs.realpath(o.Remote())
err := o.fs.client.Chtimes(realpath, modTime, modTime)
if err != nil {
return err
}
o.modTime = modTime
return nil
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Hash is not supported
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
realpath := o.realpath()
fs.Debugf(o.fs, "open [%s]", realpath)
f, err := o.fs.client.Open(realpath)
if err != nil {
return nil, err
}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
}
}
_, err = f.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
if limit != -1 {
in = readers.NewLimitedReadCloser(f, limit)
} else {
in = f
}
return in, err
}
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
err := o.fs.client.MkdirAll(dirname, 755)
if err != nil {
return err
}
info, err := o.fs.client.Stat(realpath)
if err == nil {
err = o.fs.client.Remove(realpath)
if err != nil {
return err
}
}
out, err := o.fs.client.Create(realpath)
if err != nil {
return err
}
cleanup := func() {
rerr := o.fs.client.Remove(realpath)
if rerr != nil {
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
}
}
_, err = io.Copy(out, in)
if err != nil {
cleanup()
return err
}
err = out.Close()
if err != nil {
cleanup()
return err
}
info, err = o.fs.client.Stat(realpath)
if err != nil {
return err
}
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return err
}
o.size = info.Size()
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
realpath := o.fs.realpath(o.remote)
fs.Debugf(o.fs, "remove [%s]", realpath)
return o.fs.client.Remove(realpath)
}
func (o *Object) realpath() string {
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
)

View File

@@ -58,7 +58,7 @@ The input format is comma separated list of key,value pairs. Standard
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
@@ -115,9 +115,8 @@ type Options struct {
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
opt Options // options for this backend
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
@@ -146,7 +145,8 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -172,7 +172,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
client := fshttp.NewClient(ctx)
client := fshttp.NewClient(fs.Config)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
@@ -183,8 +183,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
req, err := http.NewRequest("HEAD", u.String(), nil)
if err == nil {
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
@@ -209,19 +210,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
}).Fill(f)
if isFile {
return f, fs.ErrorIsFile
}
@@ -390,10 +389,11 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
req, err := http.NewRequest("GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
if err == nil {
@@ -440,15 +440,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
checkers = f.ci.Checkers
in = make(chan string, checkers)
in = make(chan string, fs.Config.Checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < checkers; i++ {
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -545,10 +544,11 @@ func (o *Object) stat(ctx context.Context) error {
return nil
}
url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
if err == nil && res.StatusCode == http.StatusNotFound {
@@ -585,7 +585,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
func (o *Object) Storable() bool {
return true
}
@@ -593,10 +593,11 @@ func (o *Object) Storable() bool {
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {

View File

@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler)
// Configure the remote
config.LoadConfig(context.Background())
config.LoadConfig()
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m)
f, err := NewFs(remoteName, "", m)
require.NoError(t, err)
return f, tidy
@@ -166,7 +166,8 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")
@@ -214,7 +215,7 @@ func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
f, err := NewFs(remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false)
@@ -224,7 +225,7 @@ func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "")

View File

@@ -5,7 +5,7 @@ import (
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/ncw/swift"
"github.com/rclone/rclone/fs"
)
@@ -21,10 +21,10 @@ func newAuth(f *Fs) *auth {
}
}
// Request constructs an http.Request for authentication
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
@@ -38,7 +38,7 @@ func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Reques
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
func (a *auth) Response(resp *http.Response) error {
return nil
}

View File

@@ -4,7 +4,7 @@ package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
// to be revisted after some actual experience.
import (
"context"
@@ -16,10 +16,11 @@ import (
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
swiftLib "github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -38,7 +39,7 @@ var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
"credentials.r", // Read Openstack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
@@ -56,22 +57,28 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("hubic", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
Token string `json:"token"` // Openstack token
Endpoint string `json:"endpoint"` // Openstack endpoint
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
@@ -110,10 +117,11 @@ func (f *Fs) String() string {
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req)
if err != nil {
return err
@@ -145,8 +153,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
}
@@ -156,14 +164,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
}
err = c.Authenticate(ctx)
err = c.Authenticate()
if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection")
}
@@ -176,7 +183,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}

View File

@@ -153,9 +153,9 @@ type CustomerInfo struct {
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Quota int64 `json:"quota"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
@@ -164,12 +164,6 @@ type CustomerInfo struct {
IOSHash string `json:"ios_hash"`
}
// TrashResponse is returned when emptying the Trash
type TrashResponse struct {
Folders int64 `json:"folders"`
Files int64 `json:"files"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present
@@ -386,7 +380,7 @@ type Error struct {
Cause string `xml:"cause"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {

View File

@@ -11,7 +11,6 @@ import (
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
@@ -27,7 +26,6 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
@@ -41,37 +39,26 @@ import (
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configClientID = "client_id"
configClientSecret = "client_secret"
configVersion = 1
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
v1configVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configVersion = 1
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
ClientID: "jottacli",
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
@@ -85,16 +72,18 @@ func init() {
// needs to be done early so we can use oauth during config
fs.Register(&fs.RegInfo{
Name: "jottacloud",
Description: "Jottacloud",
Description: "JottaCloud",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
log.Fatalf("Failed to parse config version - corrupted config")
}
refresh = (ver != configVersion) && (ver != v1configVersion)
refresh = ver != configVersion
}
if refresh {
@@ -109,19 +98,42 @@ func init() {
}
}
fmt.Printf("Choose authentication type:\n" +
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
clientConfig := *fs.Config
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
switch config.ChooseNumber("Your choice", 1, 3) {
case 1:
v2config(ctx, name, m)
case 2:
v1config(ctx, name, m)
case 3:
teliaCloudConfig(ctx, name, m)
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
token, err := doAuth(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
},
Options: []fs.Option{{
Name: "md5_memory_limit",
@@ -129,13 +141,13 @@ func init() {
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "trashed_only",
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Name: "unlink",
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
Default: false,
Advanced: true,
}, {
@@ -162,8 +174,8 @@ type Options struct {
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -217,7 +229,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a box 'url'
// parsePath parses an box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -239,224 +251,9 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
teliaCloudOauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
}
// v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.AuthURL = v1tokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuthV1(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(v1configVersion))
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
charset := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
opts := rest.Opts{
Method: "POST",
RootURL: v1registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration *api.DeviceRegistrationResponse
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
return deviceRegistration, err
}
// doAuthV1 runs the actual token request for V1 authentication
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
// do the first request
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, err
}
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
}
// doAuthV2 runs the actual token request for V2 authentication
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
}
@@ -602,7 +399,7 @@ func (f *Fs) setEndpointURL() {
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path
@@ -651,7 +448,7 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved
func urlPathEscape(in string) string {
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
}
@@ -661,36 +458,14 @@ func (f *Fs) filePathRaw(file string) string {
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
}
// filePath returns an escaped file path (f.root, file)
// filePath returns a escaped file path (f.root, file)
func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
//
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if v1tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
_ = req.Body.Close()
// make the refresh token upper case
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -699,56 +474,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Check config version
var ver int
version, ok := m.Get("configVersion")
if ok {
ver, err = strconv.Atoi(version)
var ok bool
var version string
if version, ok = m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = (ver == configVersion) || (ver == v1configVersion)
ok = ver == configVersion
}
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
}
baseClient := fshttp.NewClient(ctx)
if ver == configVersion {
oauthConfig.ClientID = "jottacli"
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == v1configVersion {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.TokenURL = v1tokenURL
oauthConfig.Endpoint.AuthURL = v1tokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
// if custome endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
// Create OAuth Client
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
baseClient := fshttp.NewClient(fs.Config)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
@@ -762,25 +510,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
WriteMimeType: false,
}).Fill(ctx, f)
WriteMimeType: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
if err == fs.ErrorNotAFile {
err = nil
}
return err
})
@@ -896,13 +638,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, errors.Wrap(err, "couldn't list files")
}
if bool(result.Deleted) && !f.opt.TrashedOnly {
if result.Deleted {
return nil, fs.ErrorDirNotFound
}
for i := range result.Folders {
item := &result.Folders[i]
if !f.opt.TrashedOnly && bool(item.Deleted) {
if item.Deleted {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
@@ -912,14 +654,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i := range result.Files {
item := &result.Files[i]
if f.opt.TrashedOnly {
if !item.Deleted || item.State != "COMPLETED" {
continue
}
} else {
if item.Deleted || item.State != "COMPLETED" {
continue
}
if item.Deleted || item.State != "COMPLETED" {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
@@ -1123,8 +859,8 @@ func (f *Fs) Precision() time.Duration {
}
// Purge deletes all the files and the container
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
@@ -1148,7 +884,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
return info, nil
}
// Copy src to this remote using server-side copy operations.
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
@@ -1178,7 +914,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
//return f.newObjectWithInfo(remote, &result)
}
// Move src to this remote using server-side move operations.
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
@@ -1209,7 +945,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1251,14 +987,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(remote),
Parameters: url.Values{},
}
if unlink {
if f.opt.Unlink {
opts.Parameters.Set("mode", "disableShare")
} else {
opts.Parameters.Set("mode", "enableShare")
@@ -1278,12 +1014,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
}
if err != nil {
if unlink {
if f.opt.Unlink {
return "", errors.Wrap(err, "couldn't remove public link")
}
return "", errors.Wrap(err, "couldn't create public link")
}
if unlink {
if f.opt.Unlink {
if result.PublicSharePath != "" {
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath)
}
@@ -1313,22 +1049,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "files/v1/purge_trash",
}
var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return errors.Wrap(err, "couldn't empty trash")
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
@@ -1354,7 +1074,7 @@ func (o *Object) Remote() string {
return o.remote
}
// filePath returns an escaped file path (f.root, remote)
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
@@ -1402,7 +1122,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
if err != nil {
return err
}
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
if info.Deleted {
return fs.ErrorObjectNotFound
}
return o.setMetaData(info)
@@ -1459,7 +1179,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
// we need an MD5
// we need a MD5
md5Hasher := md5.New()
// use the teeReader to write to the local file AND calculate the MD5 while doing so
teeReader := io.TeeReader(in, md5Hasher)
@@ -1517,8 +1237,6 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
md5String, err := src.Hash(ctx, hash.MD5)
if err != nil || md5String == "" {
@@ -1541,7 +1259,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime(ctx)).APIString()
@@ -1565,7 +1282,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos
@@ -1638,7 +1355,6 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -79,7 +79,7 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
}
// An Fs is a representation of a remote Koofr Fs
// A Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
@@ -250,13 +250,13 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from an Fs root relative path,
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
}
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
@@ -267,7 +267,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
return nil, err
}
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(ctx)
httpClient.Client = fshttp.NewClient(fs.Config)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
@@ -287,7 +287,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
}).Fill(f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {
@@ -421,7 +421,7 @@ func translateErrorsObject(err error) error {
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
// neccessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
@@ -603,7 +603,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)

View File

@@ -4,7 +4,6 @@ package local
import (
"context"
"os"
"syscall"
"github.com/pkg/errors"
@@ -16,9 +15,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize) // nolint: unconvert

View File

@@ -1,4 +1,4 @@
// +build windows plan9 js
// +build windows plan9
package local

View File

@@ -1,4 +1,4 @@
// +build !windows,!plan9,!js
// +build !windows,!plan9
package local

View File

@@ -10,6 +10,7 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
@@ -40,7 +41,6 @@ func init() {
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
@@ -70,20 +70,6 @@ points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead)
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
Default: false,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
@@ -101,26 +87,9 @@ Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g.
However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
check can be disabled with this flag.
If this flag is set, rclone will use its best efforts to transfer a
file which is being updated. If the file is only having things
appended to it (e.g. a log) then rclone will transfer the log file with
the size it had the first time rclone saw it.
If the file is being modified throughout (not just appended to) then
the transfer may fail with a hash check failure.
In detail, once the file has had stat() called on it for the first
time we:
- Only transfer the size that stat gave
- Only checksum the size that stat gave
- Don't update the stat info for the file
`,
check can be disabled with this flag.`,
Default: false,
Advanced: true,
}, {
@@ -148,38 +117,6 @@ Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files
Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File
Stream) may incorrectly set the actual file size equal to the
preallocated space, causing checksum and file size checks to fail.
Use this flag to disable preallocation.`,
Default: false,
Advanced: true,
}, {
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads
On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where
the OS zeros the file. However sparse files may be undesirable as they
cause disk fragmentation and can be slow to work with.`,
Default: false,
Advanced: true,
}, {
Name: "no_set_modtime",
Help: `Disable setting modtime
Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
the user rclone is running as does not own the file uploaded, such as
when copying to a CIFS mount owned by another user. If this option is
enabled, rclone will no longer update the modtime after copying a file.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -195,16 +132,12 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
NoPreAllocate bool `config:"no_preallocate"`
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -221,22 +154,20 @@ type Fs struct {
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
objectMetaMu sync.RWMutex // global lock for Object metadata
lstat func(name string) (os.FileInfo, error)
objectHashesMu sync.Mutex // global lock for Object.hashes
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path (encoded path)
path string // The local path (OS path)
// When using these items the fs.objectMetaMu must be held
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
// these are read only and don't need the mutex held
translatedLink bool // Is this object a translated link
fs *Fs // The Fs this object is part of
remote string // The remote path (encoded path)
path string // The local path (OS path)
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
translatedLink bool // Is this object a translated link
}
// ------------------------------------------------------------
@@ -244,7 +175,7 @@ type Object struct {
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -271,8 +202,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
IsLocal: true,
SlowHash: true,
}).Fill(ctx, f)
}).Fill(f)
if opt.FollowSymlinks {
f.lstat = os.Stat
}
@@ -483,8 +413,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
if os.IsNotExist(err) {
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
@@ -581,10 +511,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
if f.opt.NoSetModTime {
return fs.ModTimeNotSupported
}
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
@@ -643,28 +569,23 @@ func (f *Fs) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files in the directory
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
dir = f.localPath(dir)
fi, err := f.lstat(dir)
func (f *Fs) Purge(ctx context.Context) error {
fi, err := f.lstat(f.root)
if err != nil {
// already purged
if os.IsNotExist(err) {
return fs.ErrorDirNotFound
}
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", dir)
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(dir)
return os.RemoveAll(f.root)
}
// Move src to this remote using server-side move operations.
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
@@ -682,9 +603,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Temporary Object under construction
dstObj := f.newObject(remote)
dstObj.fs.objectMetaMu.RLock()
dstObjMode := dstObj.mode
dstObj.fs.objectMetaMu.RUnlock()
// Check it is a file if it exists
err := dstObj.lstat()
@@ -692,7 +610,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// OK
} else if err != nil {
return nil, err
} else if !dstObj.fs.isRegular(dstObjMode) {
} else if !dstObj.fs.isRegular(dstObj.mode) {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
@@ -728,7 +646,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -779,50 +697,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Supported()
}
var commandHelp = []fs.CommandHelp{
{
Name: "noop",
Short: "A null operation for testing backend commands",
Long: `This is a test command which has some options
you can try to change the output.`,
Opts: map[string]string{
"echo": "echo the input arguments",
"error": "return an error based on option value",
},
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "noop":
if txt, ok := opt["error"]; ok {
if txt == "" {
txt = "unspecified error"
}
return nil, errors.New(txt)
}
if _, ok := opt["echo"]; ok {
out := map[string]interface{}{}
out["name"] = name
out["arg"] = arg
out["opt"] = opt
return out, nil
}
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -846,31 +720,19 @@ func (o *Object) Remote() string {
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
o.fs.objectMetaMu.RLock()
oldtime := o.modTime
oldsize := o.size
o.fs.objectMetaMu.RUnlock()
err := o.lstat()
var changed bool
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
return "", errors.Wrap(err, "hash: failed to stat")
}
} else {
o.fs.objectMetaMu.RLock()
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
o.fs.objectMetaMu.RUnlock()
return "", errors.Wrap(err, "hash: failed to stat")
}
o.fs.objectMetaMu.RLock()
o.fs.objectHashesMu.Lock()
hashes := o.hashes
hashValue, hashFound := o.hashes[r]
o.fs.objectMetaMu.RUnlock()
o.fs.objectHashesMu.Unlock()
if changed || !hashFound {
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
var in io.ReadCloser
if !o.translatedLink {
@@ -882,14 +744,9 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
} else {
in, err = o.openTranslatedLink(0, -1)
}
// If not checking for updates, only read size given
if o.fs.opt.NoCheckUpdated {
in = readers.NewLimitedReadCloser(in, o.size)
}
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close()
if err != nil {
@@ -899,36 +756,29 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
hashValue = hashes[r]
o.fs.objectMetaMu.Lock()
o.fs.objectHashesMu.Lock()
if o.hashes == nil {
o.hashes = hashes
} else {
o.hashes[r] = hashValue
}
o.fs.objectMetaMu.Unlock()
o.fs.objectHashesMu.Unlock()
}
return hashValue, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
o.fs.objectMetaMu.RLock()
defer o.fs.objectMetaMu.RUnlock()
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
o.fs.objectMetaMu.RLock()
defer o.fs.objectMetaMu.RUnlock()
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.NoSetModTime {
return nil
}
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
@@ -944,9 +794,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
o.fs.objectMetaMu.RLock()
mode := o.mode
o.fs.objectMetaMu.RUnlock()
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
@@ -979,15 +827,11 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
file.o.fs.objectMetaMu.RLock()
oldtime := file.o.modTime
oldsize := file.o.size
file.o.fs.objectMetaMu.RUnlock()
if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
if file.o.size != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
}
if !oldtime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
}
}
@@ -1004,9 +848,9 @@ func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.fs.objectMetaMu.Lock()
file.o.fs.objectHashesMu.Lock()
file.o.hashes = file.hash.Sums()
file.o.fs.objectMetaMu.Unlock()
file.o.fs.objectHashesMu.Unlock()
}
}
return err
@@ -1031,7 +875,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
@@ -1046,13 +890,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
}
// If not checking updated then limit to current size. This means if
// file is being extended, readers will read a o.Size() bytes rather
// than the new size making for a consistent upload.
if limit < 0 && o.fs.opt.NoCheckUpdated {
limit = o.size
}
// Handle a translated link
if o.translatedLink {
return o.openTranslatedLink(offset, limit)
@@ -1139,12 +976,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
}
if !o.fs.opt.NoPreAllocate {
// Pre-allocate the file for performance reasons
err = file.PreAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
out = f
} else {
@@ -1191,9 +1026,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// All successful so update the hashes
if hasher != nil {
o.fs.objectMetaMu.Lock()
o.fs.objectHashesMu.Lock()
o.hashes = hasher.Sums()
o.fs.objectMetaMu.Unlock()
o.fs.objectHashesMu.Unlock()
}
// Set the mtime
@@ -1206,8 +1041,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.lstat()
}
var sparseWarning sync.Once
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
@@ -1231,50 +1064,29 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, err
}
// Pre-allocate the file for performance reasons
if !f.opt.NoPreAllocate {
err = file.PreAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
err = preAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
if !f.opt.NoSparse && file.SetSparseImplemented {
sparseWarning.Do(func() {
fs.Infof(nil, "Writing sparse files: use --local-no-sparse or --multi-thread-streams 0 to disable")
})
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Errorf(o, "Failed to set sparse: %v", err)
}
}
return out, nil
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// if not checking updated then don't update the stat
if o.fs.opt.NoCheckUpdated && !o.modTime.IsZero() {
return
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
}
o.fs.objectMetaMu.Lock()
o.size = info.Size()
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {
o.size = int64(len(linkdst))
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
}
}
// Stat an Object into info
// Stat a Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
@@ -1303,7 +1115,7 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if !noUNC {
// Convert to UNC
s = file.UNCPath(s)
s = uncPath(s)
}
return s
}
@@ -1317,6 +1129,28 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(l string) string {
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(l, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(l, `\\?\`) {
return l
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
}
if isAbsWinDrive.MatchString(l) {
return `\\?\` + l
}
return l
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
@@ -1324,7 +1158,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -6,6 +6,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@@ -88,6 +89,9 @@ func TestSymlink(t *testing.T) {
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -117,6 +121,9 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
@@ -135,7 +142,9 @@ func TestSymlink(t *testing.T) {
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
@@ -163,6 +172,6 @@ func TestSymlinkError(t *testing.T) {
"links": "true",
"copy_links": "true",
}
_, err := NewFs(context.Background(), "local", "/", m)
_, err := NewFs("local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}

View File

@@ -0,0 +1,10 @@
//+build !windows,!linux
package local
import "os"
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
return nil
}

View File

@@ -1,6 +1,6 @@
//+build linux
package file
package local
import (
"os"
@@ -18,12 +18,8 @@ var (
fallocFlagsIndex int32
)
// PreallocateImplemented is a constant indicating whether the
// implementation of Preallocate actually does anything.
const PreallocateImplemented = true
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) error {
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
@@ -48,12 +44,3 @@ again:
// }
return err
}
// SetSparseImplemented is a constant indicating whether the
// implementation of SetSparse actually does anything.
const SetSparseImplemented = false
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
return nil
}

Some files were not shown because too many files have changed in this diff Show More